From e82da1098316bea42aa12f1456315e099b989eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 9 Dec 2021 17:16:05 +0100 Subject: [PATCH] subvolumegroup: add new crd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This introduces a new CRD to add the ability to create subvolumegroup for a given ceph filesystem volume. Typically the name of the volume is the name of the filesystem created by rook. Closes: https://github.com/rook/rook/issues/7036 Signed-off-by: Sébastien Han --- .commitlintrc.json | 1 + .github/workflows/canary-integration-test.yml | 5 + Documentation/ceph-fs-subvolumegroup.md | 42 +++ .../charts/rook-ceph/templates/resources.yaml | 89 ++++++ deploy/examples/crds.yaml | 88 ++++++ deploy/examples/subvolumegroup.yaml | 9 + deploy/olm/assemble/metadata-common.yaml | 5 + pkg/apis/ceph.rook.io/v1/filesystem.go | 21 ++ pkg/apis/ceph.rook.io/v1/register.go | 2 + pkg/apis/ceph.rook.io/v1/types.go | 43 +++ .../ceph.rook.io/v1/zz_generated.deepcopy.go | 111 +++++++ .../ceph.rook.io/v1/ceph.rook.io_client.go | 5 + .../v1/cephfilesystemsubvolumegroup.go | 178 +++++++++++ .../v1/fake/fake_ceph.rook.io_client.go | 4 + .../fake/fake_cephfilesystemsubvolumegroup.go | 130 ++++++++ .../ceph.rook.io/v1/generated_expansion.go | 2 + .../v1/cephfilesystemsubvolumegroup.go | 90 ++++++ .../ceph.rook.io/v1/interface.go | 7 + .../informers/externalversions/generic.go | 2 + .../v1/cephfilesystemsubvolumegroup.go | 99 +++++++ .../ceph.rook.io/v1/expansion_generated.go | 8 + pkg/daemon/ceph/client/subvolumegroup.go | 56 ++++ pkg/operator/ceph/cluster/dependents.go | 1 + pkg/operator/ceph/controller/predicate.go | 25 ++ pkg/operator/ceph/cr_manager.go | 2 + pkg/operator/ceph/file/controller.go | 20 +- pkg/operator/ceph/file/dependent.go | 51 ++++ pkg/operator/ceph/file/dependent_test.go | 94 ++++++ .../ceph/file/subvolumegroup/controller.go | 280 ++++++++++++++++++ .../file/subvolumegroup/controller_test.go | 220 ++++++++++++++ tests/scripts/github-action-helper.sh | 1 + 31 files changed, 1690 insertions(+), 1 deletion(-) create mode 100644 Documentation/ceph-fs-subvolumegroup.md create mode 100644 deploy/examples/subvolumegroup.yaml create mode 100644 pkg/apis/ceph.rook.io/v1/filesystem.go create mode 100644 pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go create mode 100644 pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemsubvolumegroup.go create mode 100644 pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go create mode 100644 pkg/client/listers/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go create mode 100644 pkg/daemon/ceph/client/subvolumegroup.go create mode 100644 pkg/operator/ceph/file/dependent.go create mode 100644 pkg/operator/ceph/file/dependent_test.go create mode 100644 pkg/operator/ceph/file/subvolumegroup/controller.go create mode 100644 pkg/operator/ceph/file/subvolumegroup/controller_test.go diff --git a/.commitlintrc.json b/.commitlintrc.json index 4af9f92268659..3c36aaa84fbda 100644 --- a/.commitlintrc.json +++ b/.commitlintrc.json @@ -29,6 +29,7 @@ "rbd-mirror", "rgw", "security", + "subvolumegroup", "test" ] ], diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 85d226a7b3168..55c75974b2cf9 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -71,6 +71,11 @@ jobs: # copy the test file # execute the test file + - name: wait for the subvolumegroup to be created + run: | + toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') + timeout 15 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph fs subvolumegroup ls myfs|jq .[0].name|grep -q "group-a"; do sleep 1 && echo 'waiting for the subvolumegroup to be created'; done" + - name: check-ownerreferences run: tests/scripts/github-action-helper.sh check_ownerreferences diff --git a/Documentation/ceph-fs-subvolumegroup.md b/Documentation/ceph-fs-subvolumegroup.md new file mode 100644 index 0000000000000..d694d12e85a36 --- /dev/null +++ b/Documentation/ceph-fs-subvolumegroup.md @@ -0,0 +1,42 @@ +--- +title: SubVolumeGroup CRD +weight: 3610 +indent: true +--- + +{% include_relative branch.liquid %} + +This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](quickstart.md) + +# CephFilesystemSubVolumeGroup CRD + +Rook allows creation of Ceph Filesystem [SubVolumeGroups](https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-subvolume-groups) through the custom resource definitions (CRDs). +Filesystem subvolume groups are an abstraction for a directory level higher than Filesystem subvolumes to effect policies (e.g., File layouts) across a set of subvolumes. +For more information about CephFS volume, subvolumegroup and subvolume refer to the [Ceph docs](https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-volumes-and-subvolumes). + +## Creating daemon + +To get you started, here is a simple example of a CRD to create a subvolumegroup on the CephFilesystem "myfs". + +```yaml +apiVersion: ceph.rook.io/v1 +kind: CephFilesystemSubVolumeGroup +metadata: + name: group-a + namespace: rook-ceph # namespace:cluster +spec: + # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created + filesystemName: myfs +``` + +## Settings + +If any setting is unspecified, a suitable default will be used automatically. + +### CephFilesystemSubVolumeGroup metadata + +- `name`: The name that will be used for the Ceph Filesystem subvolume group. + +### CephFilesystemSubVolumeGroup spec + +- `filesystemName`: The metadata name of the CephFilesystem CR where the subvolume group will be created. diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml index bc90f8d8019a4..3ecbb9c22f602 100644 --- a/deploy/charts/rook-ceph/templates/resources.yaml +++ b/deploy/charts/rook-ceph/templates/resources.yaml @@ -5816,6 +5816,28 @@ spec: status: description: CephFilesystemStatus represents the status of a Ceph Filesystem properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array info: additionalProperties: type: string @@ -5979,6 +6001,73 @@ status: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + helm.sh/resource-policy: keep + creationTimestamp: null + name: cephfilesystemsubvolumegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystemSubVolumeGroup + listKind: CephFilesystemSubVolumeGroupList + plural: cephfilesystemsubvolumegroups + singular: cephfilesystemsubvolumegroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup + properties: + filesystemName: + description: VolName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of the CephFilesystem CR. + type: string + required: + - filesystemName + type: object + status: + description: Status represents the status of a CephFilesystem SubvolumeGroup + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml index 444551b709835..15df344c60aee 100644 --- a/deploy/examples/crds.yaml +++ b/deploy/examples/crds.yaml @@ -5813,6 +5813,28 @@ spec: status: description: CephFilesystemStatus represents the status of a Ceph Filesystem properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array info: additionalProperties: type: string @@ -5976,6 +5998,72 @@ status: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephfilesystemsubvolumegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystemSubVolumeGroup + listKind: CephFilesystemSubVolumeGroupList + plural: cephfilesystemsubvolumegroups + singular: cephfilesystemsubvolumegroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup + properties: + filesystemName: + description: VolName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of the CephFilesystem CR. + type: string + required: + - filesystemName + type: object + status: + description: Status represents the status of a CephFilesystem SubvolumeGroup + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c diff --git a/deploy/examples/subvolumegroup.yaml b/deploy/examples/subvolumegroup.yaml new file mode 100644 index 0000000000000..bfa0d59ffb112 --- /dev/null +++ b/deploy/examples/subvolumegroup.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephFilesystemSubVolumeGroup +metadata: + name: group-a + namespace: rook-ceph # namespace:cluster +spec: + # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created + filesystemName: myfs diff --git a/deploy/olm/assemble/metadata-common.yaml b/deploy/olm/assemble/metadata-common.yaml index 5faa9a99c5b67..52c17383a7ded 100644 --- a/deploy/olm/assemble/metadata-common.yaml +++ b/deploy/olm/assemble/metadata-common.yaml @@ -163,6 +163,11 @@ spec: version: v1 displayName: Ceph Bucket Topic description: Represents a Ceph Bucket Topic. + - kind: CephFilesystemSubVolumeGroup + name: cephfilesystemsubvolumegroups.ceph.rook.io + version: v1 + displayName: Ceph Filesystem SubVolumeGroup + description: Represents a Ceph Filesystem SubVolumeGroup. displayName: Rook-Ceph description: | diff --git a/pkg/apis/ceph.rook.io/v1/filesystem.go b/pkg/apis/ceph.rook.io/v1/filesystem.go new file mode 100644 index 0000000000000..d5128ec11da62 --- /dev/null +++ b/pkg/apis/ceph.rook.io/v1/filesystem.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func (c *CephFilesystem) GetStatusConditions() *[]Condition { + return &c.Status.Conditions +} diff --git a/pkg/apis/ceph.rook.io/v1/register.go b/pkg/apis/ceph.rook.io/v1/register.go index ef876653c6dd4..ef6b5aca117cf 100644 --- a/pkg/apis/ceph.rook.io/v1/register.go +++ b/pkg/apis/ceph.rook.io/v1/register.go @@ -83,6 +83,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CephRBDMirrorList{}, &CephFilesystemMirror{}, &CephFilesystemMirrorList{}, + &CephFilesystemSubVolumeGroup{}, + &CephFilesystemSubVolumeGroupList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) scheme.AddKnownTypes(bktv1alpha1.SchemeGroupVersion, diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index bd1a0ad5a692a..90c5c34b4905d 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -1096,6 +1096,7 @@ type CephFilesystemStatus struct { // MirroringStatus is the filesystem mirroring status // +optional MirroringStatus *FilesystemMirroringInfoSpec `json:"mirroringStatus,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` } // FilesystemMirroringInfo is the status of the pool mirroring @@ -2302,3 +2303,45 @@ type StorageClassDeviceSet struct { // +optional Encrypted bool `json:"encrypted,omitempty"` } + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup +// +kubebuilder:subresource:status +type CephFilesystemSubVolumeGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph Filesystem SubVolumeGroup + Spec CephFilesystemSubVolumeGroupSpec `json:"spec"` + // Status represents the status of a CephFilesystem SubvolumeGroup + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *CephFilesystemSubVolumeGroupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemSubVolumeGroup represents a list of Ceph Clients +type CephFilesystemSubVolumeGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephFilesystemSubVolumeGroup `json:"items"` +} + +// CephFilesystemSubVolumeGroupSpec represents the specification of a Ceph Filesystem SubVolumeGroup +type CephFilesystemSubVolumeGroupSpec struct { + // VolName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of + // the CephFilesystem CR. + FilesystemName string `json:"filesystemName"` +} + +// CephFilesystemSubVolumeGroupStatus represents the Status of Ceph Filesystem SubVolumeGroup +type CephFilesystemSubVolumeGroupStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` +} diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go index b71c459e8a1e7..238ed95adf38f 100644 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -855,6 +855,13 @@ func (in *CephFilesystemStatus) DeepCopyInto(out *CephFilesystemStatus) { *out = new(FilesystemMirroringInfoSpec) (*in).DeepCopyInto(*out) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -868,6 +875,110 @@ func (in *CephFilesystemStatus) DeepCopy() *CephFilesystemStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroup) DeepCopyInto(out *CephFilesystemSubVolumeGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephFilesystemSubVolumeGroupStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroup. +func (in *CephFilesystemSubVolumeGroup) DeepCopy() *CephFilesystemSubVolumeGroup { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemSubVolumeGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupList) DeepCopyInto(out *CephFilesystemSubVolumeGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephFilesystemSubVolumeGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupList. +func (in *CephFilesystemSubVolumeGroupList) DeepCopy() *CephFilesystemSubVolumeGroupList { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemSubVolumeGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupSpec) DeepCopyInto(out *CephFilesystemSubVolumeGroupSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupSpec. +func (in *CephFilesystemSubVolumeGroupSpec) DeepCopy() *CephFilesystemSubVolumeGroupSpec { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupStatus) DeepCopyInto(out *CephFilesystemSubVolumeGroupStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupStatus. +func (in *CephFilesystemSubVolumeGroupStatus) DeepCopy() *CephFilesystemSubVolumeGroupStatus { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CephHealthMessage) DeepCopyInto(out *CephHealthMessage) { *out = *in diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go index 78f56ce4fbdd9..c4d622edabbd9 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go @@ -33,6 +33,7 @@ type CephV1Interface interface { CephClustersGetter CephFilesystemsGetter CephFilesystemMirrorsGetter + CephFilesystemSubVolumeGroupsGetter CephNFSesGetter CephObjectRealmsGetter CephObjectStoresGetter @@ -75,6 +76,10 @@ func (c *CephV1Client) CephFilesystemMirrors(namespace string) CephFilesystemMir return newCephFilesystemMirrors(c, namespace) } +func (c *CephV1Client) CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupInterface { + return newCephFilesystemSubVolumeGroups(c, namespace) +} + func (c *CephV1Client) CephNFSes(namespace string) CephNFSInterface { return newCephNFSes(c, namespace) } diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go new file mode 100644 index 0000000000000..80a66e56aa804 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephFilesystemSubVolumeGroupsGetter has a method to return a CephFilesystemSubVolumeGroupInterface. +// A group's client should implement this interface. +type CephFilesystemSubVolumeGroupsGetter interface { + CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupInterface +} + +// CephFilesystemSubVolumeGroupInterface has methods to work with CephFilesystemSubVolumeGroup resources. +type CephFilesystemSubVolumeGroupInterface interface { + Create(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.CreateOptions) (*v1.CephFilesystemSubVolumeGroup, error) + Update(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.UpdateOptions) (*v1.CephFilesystemSubVolumeGroup, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystemSubVolumeGroup, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemSubVolumeGroupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemSubVolumeGroup, err error) + CephFilesystemSubVolumeGroupExpansion +} + +// cephFilesystemSubVolumeGroups implements CephFilesystemSubVolumeGroupInterface +type cephFilesystemSubVolumeGroups struct { + client rest.Interface + ns string +} + +// newCephFilesystemSubVolumeGroups returns a CephFilesystemSubVolumeGroups +func newCephFilesystemSubVolumeGroups(c *CephV1Client, namespace string) *cephFilesystemSubVolumeGroups { + return &cephFilesystemSubVolumeGroups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephFilesystemSubVolumeGroup, and returns the corresponding cephFilesystemSubVolumeGroup object, and an error if there is any. +func (c *cephFilesystemSubVolumeGroups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephFilesystemSubVolumeGroups that match those selectors. +func (c *cephFilesystemSubVolumeGroups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemSubVolumeGroupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephFilesystemSubVolumeGroupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephFilesystemSubVolumeGroups. +func (c *cephFilesystemSubVolumeGroups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephFilesystemSubVolumeGroup and creates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *cephFilesystemSubVolumeGroups) Create(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.CreateOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemSubVolumeGroup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephFilesystemSubVolumeGroup and updates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *cephFilesystemSubVolumeGroups) Update(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.UpdateOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(cephFilesystemSubVolumeGroup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemSubVolumeGroup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephFilesystemSubVolumeGroup and deletes it. Returns an error if one occurs. +func (c *cephFilesystemSubVolumeGroups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephFilesystemSubVolumeGroups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephFilesystemSubVolumeGroup. +func (c *cephFilesystemSubVolumeGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go index f4ab980988783..ff9c8aa8cf9b2 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go @@ -56,6 +56,10 @@ func (c *FakeCephV1) CephFilesystemMirrors(namespace string) v1.CephFilesystemMi return &FakeCephFilesystemMirrors{c, namespace} } +func (c *FakeCephV1) CephFilesystemSubVolumeGroups(namespace string) v1.CephFilesystemSubVolumeGroupInterface { + return &FakeCephFilesystemSubVolumeGroups{c, namespace} +} + func (c *FakeCephV1) CephNFSes(namespace string) v1.CephNFSInterface { return &FakeCephNFSes{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemsubvolumegroup.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemsubvolumegroup.go new file mode 100644 index 0000000000000..f9afe5ba18ce0 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephfilesystemsubvolumegroup.go @@ -0,0 +1,130 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCephFilesystemSubVolumeGroups implements CephFilesystemSubVolumeGroupInterface +type FakeCephFilesystemSubVolumeGroups struct { + Fake *FakeCephV1 + ns string +} + +var cephfilesystemsubvolumegroupsResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephfilesystemsubvolumegroups"} + +var cephfilesystemsubvolumegroupsKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephFilesystemSubVolumeGroup"} + +// Get takes name of the cephFilesystemSubVolumeGroup, and returns the corresponding cephFilesystemSubVolumeGroup object, and an error if there is any. +func (c *FakeCephFilesystemSubVolumeGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephFilesystemSubVolumeGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(cephfilesystemsubvolumegroupsResource, c.ns, name), &cephrookiov1.CephFilesystemSubVolumeGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephFilesystemSubVolumeGroup), err +} + +// List takes label and field selectors, and returns the list of CephFilesystemSubVolumeGroups that match those selectors. +func (c *FakeCephFilesystemSubVolumeGroups) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephFilesystemSubVolumeGroupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(cephfilesystemsubvolumegroupsResource, cephfilesystemsubvolumegroupsKind, c.ns, opts), &cephrookiov1.CephFilesystemSubVolumeGroupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &cephrookiov1.CephFilesystemSubVolumeGroupList{ListMeta: obj.(*cephrookiov1.CephFilesystemSubVolumeGroupList).ListMeta} + for _, item := range obj.(*cephrookiov1.CephFilesystemSubVolumeGroupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cephFilesystemSubVolumeGroups. +func (c *FakeCephFilesystemSubVolumeGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(cephfilesystemsubvolumegroupsResource, c.ns, opts)) + +} + +// Create takes the representation of a cephFilesystemSubVolumeGroup and creates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *FakeCephFilesystemSubVolumeGroups) Create(ctx context.Context, cephFilesystemSubVolumeGroup *cephrookiov1.CephFilesystemSubVolumeGroup, opts v1.CreateOptions) (result *cephrookiov1.CephFilesystemSubVolumeGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cephfilesystemsubvolumegroupsResource, c.ns, cephFilesystemSubVolumeGroup), &cephrookiov1.CephFilesystemSubVolumeGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephFilesystemSubVolumeGroup), err +} + +// Update takes the representation of a cephFilesystemSubVolumeGroup and updates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *FakeCephFilesystemSubVolumeGroups) Update(ctx context.Context, cephFilesystemSubVolumeGroup *cephrookiov1.CephFilesystemSubVolumeGroup, opts v1.UpdateOptions) (result *cephrookiov1.CephFilesystemSubVolumeGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cephfilesystemsubvolumegroupsResource, c.ns, cephFilesystemSubVolumeGroup), &cephrookiov1.CephFilesystemSubVolumeGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephFilesystemSubVolumeGroup), err +} + +// Delete takes name of the cephFilesystemSubVolumeGroup and deletes it. Returns an error if one occurs. +func (c *FakeCephFilesystemSubVolumeGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cephfilesystemsubvolumegroupsResource, c.ns, name), &cephrookiov1.CephFilesystemSubVolumeGroup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCephFilesystemSubVolumeGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cephfilesystemsubvolumegroupsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &cephrookiov1.CephFilesystemSubVolumeGroupList{}) + return err +} + +// Patch applies the patch and returns the patched cephFilesystemSubVolumeGroup. +func (c *FakeCephFilesystemSubVolumeGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephFilesystemSubVolumeGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(cephfilesystemsubvolumegroupsResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephFilesystemSubVolumeGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephFilesystemSubVolumeGroup), err +} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go index 57a77e4090e49..f7aea4f9e06b9 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go @@ -32,6 +32,8 @@ type CephFilesystemExpansion interface{} type CephFilesystemMirrorExpansion interface{} +type CephFilesystemSubVolumeGroupExpansion interface{} + type CephNFSExpansion interface{} type CephObjectRealmExpansion interface{} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go new file mode 100644 index 0000000000000..0559d0693047f --- /dev/null +++ b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + versioned "github.com/rook/rook/pkg/client/clientset/versioned" + internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CephFilesystemSubVolumeGroupInformer provides access to a shared informer and lister for +// CephFilesystemSubVolumeGroups. +type CephFilesystemSubVolumeGroupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CephFilesystemSubVolumeGroupLister +} + +type cephFilesystemSubVolumeGroupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCephFilesystemSubVolumeGroupInformer constructs a new informer for CephFilesystemSubVolumeGroup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCephFilesystemSubVolumeGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCephFilesystemSubVolumeGroupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCephFilesystemSubVolumeGroupInformer constructs a new informer for CephFilesystemSubVolumeGroup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCephFilesystemSubVolumeGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CephV1().CephFilesystemSubVolumeGroups(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CephV1().CephFilesystemSubVolumeGroups(namespace).Watch(context.TODO(), options) + }, + }, + &cephrookiov1.CephFilesystemSubVolumeGroup{}, + resyncPeriod, + indexers, + ) +} + +func (f *cephFilesystemSubVolumeGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCephFilesystemSubVolumeGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cephFilesystemSubVolumeGroupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&cephrookiov1.CephFilesystemSubVolumeGroup{}, f.defaultInformer) +} + +func (f *cephFilesystemSubVolumeGroupInformer) Lister() v1.CephFilesystemSubVolumeGroupLister { + return v1.NewCephFilesystemSubVolumeGroupLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go index bbaf68dd8499d..da097ad2659d9 100644 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go +++ b/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go @@ -38,6 +38,8 @@ type Interface interface { CephFilesystems() CephFilesystemInformer // CephFilesystemMirrors returns a CephFilesystemMirrorInformer. CephFilesystemMirrors() CephFilesystemMirrorInformer + // CephFilesystemSubVolumeGroups returns a CephFilesystemSubVolumeGroupInformer. + CephFilesystemSubVolumeGroups() CephFilesystemSubVolumeGroupInformer // CephNFSes returns a CephNFSInformer. CephNFSes() CephNFSInformer // CephObjectRealms returns a CephObjectRealmInformer. @@ -100,6 +102,11 @@ func (v *version) CephFilesystemMirrors() CephFilesystemMirrorInformer { return &cephFilesystemMirrorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// CephFilesystemSubVolumeGroups returns a CephFilesystemSubVolumeGroupInformer. +func (v *version) CephFilesystemSubVolumeGroups() CephFilesystemSubVolumeGroupInformer { + return &cephFilesystemSubVolumeGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // CephNFSes returns a CephNFSInformer. func (v *version) CephNFSes() CephNFSInformer { return &cephNFSInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 36d712a71f026..cac94cb33f2b5 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -67,6 +67,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephFilesystems().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephfilesystemmirrors"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephFilesystemMirrors().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("cephfilesystemsubvolumegroups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephFilesystemSubVolumeGroups().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephnfses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephNFSes().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephobjectrealms"): diff --git a/pkg/client/listers/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go b/pkg/client/listers/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go new file mode 100644 index 0000000000000..5397d0a9ebd44 --- /dev/null +++ b/pkg/client/listers/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CephFilesystemSubVolumeGroupLister helps list CephFilesystemSubVolumeGroups. +// All objects returned here must be treated as read-only. +type CephFilesystemSubVolumeGroupLister interface { + // List lists all CephFilesystemSubVolumeGroups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CephFilesystemSubVolumeGroup, err error) + // CephFilesystemSubVolumeGroups returns an object that can list and get CephFilesystemSubVolumeGroups. + CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupNamespaceLister + CephFilesystemSubVolumeGroupListerExpansion +} + +// cephFilesystemSubVolumeGroupLister implements the CephFilesystemSubVolumeGroupLister interface. +type cephFilesystemSubVolumeGroupLister struct { + indexer cache.Indexer +} + +// NewCephFilesystemSubVolumeGroupLister returns a new CephFilesystemSubVolumeGroupLister. +func NewCephFilesystemSubVolumeGroupLister(indexer cache.Indexer) CephFilesystemSubVolumeGroupLister { + return &cephFilesystemSubVolumeGroupLister{indexer: indexer} +} + +// List lists all CephFilesystemSubVolumeGroups in the indexer. +func (s *cephFilesystemSubVolumeGroupLister) List(selector labels.Selector) (ret []*v1.CephFilesystemSubVolumeGroup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CephFilesystemSubVolumeGroup)) + }) + return ret, err +} + +// CephFilesystemSubVolumeGroups returns an object that can list and get CephFilesystemSubVolumeGroups. +func (s *cephFilesystemSubVolumeGroupLister) CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupNamespaceLister { + return cephFilesystemSubVolumeGroupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CephFilesystemSubVolumeGroupNamespaceLister helps list and get CephFilesystemSubVolumeGroups. +// All objects returned here must be treated as read-only. +type CephFilesystemSubVolumeGroupNamespaceLister interface { + // List lists all CephFilesystemSubVolumeGroups in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CephFilesystemSubVolumeGroup, err error) + // Get retrieves the CephFilesystemSubVolumeGroup from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.CephFilesystemSubVolumeGroup, error) + CephFilesystemSubVolumeGroupNamespaceListerExpansion +} + +// cephFilesystemSubVolumeGroupNamespaceLister implements the CephFilesystemSubVolumeGroupNamespaceLister +// interface. +type cephFilesystemSubVolumeGroupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CephFilesystemSubVolumeGroups in the indexer for a given namespace. +func (s cephFilesystemSubVolumeGroupNamespaceLister) List(selector labels.Selector) (ret []*v1.CephFilesystemSubVolumeGroup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CephFilesystemSubVolumeGroup)) + }) + return ret, err +} + +// Get retrieves the CephFilesystemSubVolumeGroup from the indexer for a given namespace and name. +func (s cephFilesystemSubVolumeGroupNamespaceLister) Get(name string) (*v1.CephFilesystemSubVolumeGroup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("cephfilesystemsubvolumegroup"), name) + } + return obj.(*v1.CephFilesystemSubVolumeGroup), nil +} diff --git a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go b/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go index 032f13431e13c..b31641e019b50 100644 --- a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go +++ b/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go @@ -74,6 +74,14 @@ type CephFilesystemMirrorListerExpansion interface{} // CephFilesystemMirrorNamespaceLister. type CephFilesystemMirrorNamespaceListerExpansion interface{} +// CephFilesystemSubVolumeGroupListerExpansion allows custom methods to be added to +// CephFilesystemSubVolumeGroupLister. +type CephFilesystemSubVolumeGroupListerExpansion interface{} + +// CephFilesystemSubVolumeGroupNamespaceListerExpansion allows custom methods to be added to +// CephFilesystemSubVolumeGroupNamespaceLister. +type CephFilesystemSubVolumeGroupNamespaceListerExpansion interface{} + // CephNFSListerExpansion allows custom methods to be added to // CephNFSLister. type CephNFSListerExpansion interface{} diff --git a/pkg/daemon/ceph/client/subvolumegroup.go b/pkg/daemon/ceph/client/subvolumegroup.go new file mode 100644 index 0000000000000..a73a4863e645a --- /dev/null +++ b/pkg/daemon/ceph/client/subvolumegroup.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "github.com/pkg/errors" + "github.com/rook/rook/pkg/clusterd" +) + +// CreateCephFSSubVolumeGroup create a CephFS subvolume group. +// volName is the name of the Ceph FS volume, the same as the CephFilesystem CR name. +func CreateCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string) error { + logger.Infof("creating cephfs sub volume group %q", volName) + // [--pool_layout ] [--uid ] [--gid ] [--mode ] + args := []string{"fs", "subvolumegroup", "create", volName, groupName} + cmd := NewCephCommand(context, clusterInfo, args) + cmd.JsonOutput = false + output, err := cmd.Run() + if err != nil { + return errors.Wrapf(err, "failed to create sub volume group %q. %s", volName, output) + } + + logger.Infof("successfully created cephfs sub volume group %q", volName) + return nil +} + +// DeleteCephFSSubVolumeGroup delete a CephFS subvolume group. +func DeleteCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string) error { + logger.Infof("deleting cephfs sub volume group %q", volName) + // --force? + args := []string{"fs", "subvolumegroup", "rm", volName, groupName} + cmd := NewCephCommand(context, clusterInfo, args) + cmd.JsonOutput = false + _, err := cmd.Run() + if err != nil { + // Intentionally don't wrap the error so the caller can inspect the return code + return err + } + + logger.Infof("successfully deleted cephfs sub volume group %q", volName) + return nil +} diff --git a/pkg/operator/ceph/cluster/dependents.go b/pkg/operator/ceph/cluster/dependents.go index a6c9d4503e983..733c68f577f46 100644 --- a/pkg/operator/ceph/cluster/dependents.go +++ b/pkg/operator/ceph/cluster/dependents.go @@ -46,6 +46,7 @@ var ( "CephClientList", "CephBucketTopic", "CephBucketNotification", + "CephFilesystemSubVolumeGroup", } ) diff --git a/pkg/operator/ceph/controller/predicate.go b/pkg/operator/ceph/controller/predicate.go index cefeb953b31ad..72cbf54b4e28a 100644 --- a/pkg/operator/ceph/controller/predicate.go +++ b/pkg/operator/ceph/controller/predicate.go @@ -366,6 +366,31 @@ func WatchControllerPredicate() predicate.Funcs { return true } + case *cephv1.CephFilesystemSubVolumeGroup: + objNew := e.ObjectNew.(*cephv1.CephFilesystemSubVolumeGroup) + logger.Debug("update event on CephFilesystemSubVolumeGroup CR") + // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request + IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) + if IsDoNotReconcile { + logger.Debugf("object %q matched on update but %q label is set, doing nothing", objNew.Name, DoNotReconcileLabelName) + return false + } + diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) + if diff != "" { + logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) + return true + } else if objectToBeDeleted(objOld, objNew) { + logger.Debugf("CR %q is going be deleted", objNew.Name) + return true + } else if objOld.GetGeneration() != objNew.GetGeneration() { + logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) + } + // Handling upgrades + isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) + if isUpgrade { + return true + } + case *bktv1alpha1.ObjectBucketClaim: objNew := e.ObjectNew.(*bktv1alpha1.ObjectBucketClaim) logger.Debug("update event on ObjectBucketClaim CR") diff --git a/pkg/operator/ceph/cr_manager.go b/pkg/operator/ceph/cr_manager.go index c2dce22a1ef24..207b901d1f1aa 100644 --- a/pkg/operator/ceph/cr_manager.go +++ b/pkg/operator/ceph/cr_manager.go @@ -33,6 +33,7 @@ import ( "github.com/rook/rook/pkg/operator/ceph/disruption/machinelabel" "github.com/rook/rook/pkg/operator/ceph/file" "github.com/rook/rook/pkg/operator/ceph/file/mirror" + "github.com/rook/rook/pkg/operator/ceph/file/subvolumegroup" "github.com/rook/rook/pkg/operator/ceph/nfs" "github.com/rook/rook/pkg/operator/ceph/object" "github.com/rook/rook/pkg/operator/ceph/object/bucket" @@ -106,6 +107,7 @@ var AddToManagerFuncs = []func(manager.Manager, *clusterd.Context, context.Conte bucket.Add, topic.Add, notification.Add, + subvolumegroup.Add, } // AddToManagerOpFunc is a list of functions to add all Controllers to the Manager (entrypoint for diff --git a/pkg/operator/ceph/file/controller.go b/pkg/operator/ceph/file/controller.go index f3c490c73dd62..afc0ec2536d21 100644 --- a/pkg/operator/ceph/file/controller.go +++ b/pkg/operator/ceph/file/controller.go @@ -32,6 +32,7 @@ import ( "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/file/mirror" + "github.com/rook/rook/pkg/operator/ceph/reporting" "github.com/rook/rook/pkg/operator/k8sutil" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -39,6 +40,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -69,9 +71,13 @@ var controllerTypeMeta = metav1.TypeMeta{ var currentAndDesiredCephVersion = opcontroller.CurrentAndDesiredCephVersion +// allow this to be overridden for unit tests +var cephFilesystemDependents = CephFilesystemDependents + // ReconcileCephFilesystem reconciles a CephFilesystem object type ReconcileCephFilesystem struct { client client.Client + recorder record.EventRecorder scheme *runtime.Scheme context *clusterd.Context cephClusterSpec *cephv1.ClusterSpec @@ -97,6 +103,7 @@ func Add(mgr manager.Manager, context *clusterd.Context, opManagerContext contex func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context, opConfig opcontroller.OperatorConfig) reconcile.Reconciler { return &ReconcileCephFilesystem{ client: mgr.GetClient(), + recorder: mgr.GetEventRecorderFor("rook-" + controllerName), scheme: mgr.GetScheme(), context: context, fsContexts: make(map[string]*fsHealth), @@ -236,6 +243,16 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil // DELETE: the CR was deleted if !cephFilesystem.GetDeletionTimestamp().IsZero() { + deps, err := cephFilesystemDependents(r.context, r.clusterInfo, cephFilesystem) + if err != nil { + return reconcile.Result{}, err + } + if !deps.Empty() { + err := reporting.ReportDeletionBlockedDueToDependents(logger, r.client, cephFilesystem, deps) + return opcontroller.WaitForRequeueIfFinalizerBlocked, err + } + reporting.ReportDeletionNotBlockedDueToDependents(logger, r.client, r.recorder, cephFilesystem) + runningCephVersion, err := cephclient.LeastUptodateDaemonVersion(r.context, clusterInfo, config.MonType) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to retrieve current ceph %q version", config.MonType) @@ -359,7 +376,8 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } } if !statusUpdated { - // Set Ready status, we are done reconciling + // Set Ready status, we are done reconciling$ + // TODO: set status to Ready **only** if the filesystem is ready r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, nil) } diff --git a/pkg/operator/ceph/file/dependent.go b/pkg/operator/ceph/file/dependent.go new file mode 100644 index 0000000000000..54439fefbe2bb --- /dev/null +++ b/pkg/operator/ceph/file/dependent.go @@ -0,0 +1,51 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package file + +import ( + "fmt" + + "github.com/pkg/errors" + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/util/dependents" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CephFilesystemDependents returns the subvolume group(s) which exist in the ceph filesystem that should block +// deletion. +func CephFilesystemDependents(clusterdCtx *clusterd.Context, clusterInfo *client.ClusterInfo, filesystem *v1.CephFilesystem) (*dependents.DependentList, error) { + nsName := fmt.Sprintf("%s/%s", filesystem.Namespace, filesystem.Name) + baseErrMsg := fmt.Sprintf("failed to get dependents of CephFilesystem %q", nsName) + + deps := dependents.NewDependentList() + + // CephFilesystemSubVolumeGroups + subVolumeGroups, err := clusterdCtx.RookClientset.CephV1().CephFilesystemSubVolumeGroups(filesystem.Namespace).List(clusterInfo.Context, metav1.ListOptions{}) + if err != nil { + return deps, errors.Wrapf(err, "%s. failed to list CephFilesystemSubVolumeGroups for CephFilesystem %q", baseErrMsg, nsName) + } + for _, subVolumeGroup := range subVolumeGroups.Items { + if subVolumeGroup.Spec.FilesystemName == filesystem.Name { + deps.Add("CephFilesystemSubVolumeGroups", subVolumeGroup.Name) + } + logger.Debugf("found CephFilesystemSubVolumeGroups %q that does not depend on CephFilesystem %q", subVolumeGroup.Name, nsName) + } + + return deps, nil +} diff --git a/pkg/operator/ceph/file/dependent_test.go b/pkg/operator/ceph/file/dependent_test.go new file mode 100644 index 0000000000000..07b4e5d573697 --- /dev/null +++ b/pkg/operator/ceph/file/dependent_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package file + +import ( + "context" + "testing" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/stretchr/testify/assert" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestCephFilesystemDependents(t *testing.T) { + ctx := context.TODO() + scheme := runtime.NewScheme() + assert.NoError(t, cephv1.AddToScheme(scheme)) + ns := "test-ceph-filesystem-dependents" + var c *clusterd.Context + + newClusterdCtx := func(objects ...runtime.Object) *clusterd.Context { + return &clusterd.Context{ + RookClientset: rookclient.NewSimpleClientset(), + } + } + + clusterInfo := client.AdminTestClusterInfo(ns) + // Create objectmeta with the given name in our test namespace + meta := func(name string) v1.ObjectMeta { + return v1.ObjectMeta{ + Name: name, + Namespace: ns, + } + } + + fs := &cephv1.CephFilesystem{ + ObjectMeta: v1.ObjectMeta{ + Name: "myfs", + Namespace: ns, + }, + } + + t.Run("no subvolumegroups", func(t *testing.T) { + c = newClusterdCtx() + deps, err := CephFilesystemDependents(c, clusterInfo, fs) + assert.NoError(t, err) + assert.True(t, deps.Empty()) + }) + + t.Run("one subvolumegroups but wrong fs", func(t *testing.T) { + otherFs := &cephv1.CephFilesystem{ + ObjectMeta: v1.ObjectMeta{ + Name: "otherfs", + Namespace: ns, + }, + } + + c = newClusterdCtx(&cephv1.CephFilesystemSubVolumeGroup{ObjectMeta: meta("subvolgroup1")}) + _, err := c.RookClientset.CephV1().CephFilesystemSubVolumeGroups(clusterInfo.Namespace).Create(ctx, &cephv1.CephFilesystemSubVolumeGroup{ObjectMeta: meta("subvolgroup1")}, v1.CreateOptions{}) + assert.NoError(t, err) + assert.NoError(t, err) + deps, err := CephFilesystemDependents(c, clusterInfo, otherFs) + assert.NoError(t, err) + assert.True(t, deps.Empty()) + }) + + t.Run("one subvolumegroups", func(t *testing.T) { + c = newClusterdCtx(&cephv1.CephFilesystemSubVolumeGroup{ObjectMeta: meta("subvolgroup1")}) + _, err := c.RookClientset.CephV1().CephFilesystemSubVolumeGroups(clusterInfo.Namespace).Create(ctx, &cephv1.CephFilesystemSubVolumeGroup{ObjectMeta: meta("subvolgroup1"), Spec: cephv1.CephFilesystemSubVolumeGroupSpec{FilesystemName: "myfs"}}, v1.CreateOptions{}) + assert.NoError(t, err) + assert.NoError(t, err) + deps, err := CephFilesystemDependents(c, clusterInfo, fs) + assert.NoError(t, err) + assert.False(t, deps.Empty()) + }) +} diff --git a/pkg/operator/ceph/file/subvolumegroup/controller.go b/pkg/operator/ceph/file/subvolumegroup/controller.go new file mode 100644 index 0000000000000..c65434723cf8e --- /dev/null +++ b/pkg/operator/ceph/file/subvolumegroup/controller.go @@ -0,0 +1,280 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package subvolumegroup to manage CephFS subvolume groups +package subvolumegroup + +import ( + "context" + "fmt" + "reflect" + "strings" + "syscall" + "time" + + "github.com/pkg/errors" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/util/exec" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/coreos/pkg/capnslog" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/operator/ceph/cluster/mon" + opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/reporting" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +const ( + controllerName = "ceph-fs-subvolumegroup-controller" +) + +var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) + +var cephFilesystemSubVolumeGroup = reflect.TypeOf(cephv1.CephFilesystemSubVolumeGroup{}).Name() + +// Sets the type meta for the controller main object +var controllerTypeMeta = metav1.TypeMeta{ + Kind: cephFilesystemSubVolumeGroup, + APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), +} + +// ReconcileCephFilesystemSubVolumeGroup reconciles a CephFilesystemSubVolumeGroup object +type ReconcileCephFilesystemSubVolumeGroup struct { + client client.Client + scheme *runtime.Scheme + context *clusterd.Context + clusterInfo *cephclient.ClusterInfo + opManagerContext context.Context +} + +// Add creates a new CephFilesystemSubVolumeGroup Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context, opConfig opcontroller.OperatorConfig) error { + return add(mgr, newReconciler(mgr, context, opManagerContext)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context) reconcile.Reconciler { + return &ReconcileCephFilesystemSubVolumeGroup{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + context: context, + opManagerContext: opManagerContext, + } +} + +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + logger.Info("successfully started") + + // Watch for changes on the CephFilesystemSubVolumeGroup CRD object + err = c.Watch(&source.Kind{Type: &cephv1.CephFilesystemSubVolumeGroup{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) + if err != nil { + return err + } + + return nil +} + +// Reconcile reads that state of the cluster for a CephFilesystemSubVolumeGroup object and makes changes based on the state read +// and what is in the CephFilesystemSubVolumeGroup.Spec +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileCephFilesystemSubVolumeGroup) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { + // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface + reconcileResponse, err := r.reconcile(request) + if err != nil { + logger.Errorf("failed to reconcile %v", err) + } + + return reconcileResponse, err +} + +func (r *ReconcileCephFilesystemSubVolumeGroup) reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the CephFilesystemSubVolumeGroup instance + cephFilesystemSubVolumeGroup := &cephv1.CephFilesystemSubVolumeGroup{} + err := r.client.Get(r.opManagerContext, request.NamespacedName, cephFilesystemSubVolumeGroup) + if err != nil { + if kerrors.IsNotFound(err) { + logger.Debug("cephFilesystemSubVolumeGroup resource not found. Ignoring since object must be deleted.") + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, errors.Wrap(err, "failed to get cephFilesystemSubVolumeGroup") + } + + // Set a finalizer so we can do cleanup before the object goes away + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephFilesystemSubVolumeGroup) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") + } + + // The CR was just created, initializing status fields + if cephFilesystemSubVolumeGroup.Status == nil { + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing) + } + + // Make sure a CephCluster is present otherwise do nothing + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, request.NamespacedName, controllerName) + if !isReadyToReconcile { + // This handles the case where the Ceph Cluster is gone and we want to delete that CR + // We skip the deleteSubVolumeGroup() function since everything is gone already + // + // Also, only remove the finalizer if the CephCluster is gone + // If not, we should wait for it to be ready + // This handles the case where the operator is not ready to accept Ceph command but the cluster exists + if !cephFilesystemSubVolumeGroup.GetDeletionTimestamp().IsZero() && !cephClusterExists { + // Remove finalizer + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephFilesystemSubVolumeGroup) + if err != nil { + return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") + } + + // Return and do not requeue. Successful deletion. + return reconcile.Result{}, nil + } + return reconcileResponse, nil + } + + // Populate clusterInfo during each reconcile + r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, r.opManagerContext, request.NamespacedName.Namespace) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") + } + r.clusterInfo.Context = r.opManagerContext + + // DELETE: the CR was deleted + if !cephFilesystemSubVolumeGroup.GetDeletionTimestamp().IsZero() { + logger.Debugf("deleting subvolume group %q", cephFilesystemSubVolumeGroup.Name) + err := r.deleteSubVolumeGroup(cephFilesystemSubVolumeGroup) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to delete ceph ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name) + } + + // Remove finalizer + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephFilesystemSubVolumeGroup) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") + } + + // Return and do not requeue. Successful deletion. + return reconcile.Result{}, nil + } + + // Build the NamespacedName to fetch the Filesystem and make sure it exists, if not we cannot + // create the subvolumegroup + cephFilesystem := &cephv1.CephFilesystem{} + cephFilesystemNamespacedName := types.NamespacedName{Name: cephFilesystemSubVolumeGroup.Spec.FilesystemName, Namespace: request.Namespace} + err = r.client.Get(r.opManagerContext, cephFilesystemNamespacedName, cephFilesystem) + if err != nil { + if kerrors.IsNotFound(err) { + return reconcile.Result{}, errors.Wrapf(err, "failed to fetch ceph filesystem %q, cannot create subvolumegroup %q", cephFilesystemSubVolumeGroup.Spec.FilesystemName, cephFilesystemSubVolumeGroup.Name) + } + // Error reading the object - requeue the request. + return reconcile.Result{}, errors.Wrap(err, "failed to get cephFilesystemSubVolumeGroup") + } + + // If the CephFilesystem is not ready to accept commands, we should wait for it to be ready + if cephFilesystem.Status.Phase != cephv1.ConditionReady { + // We know the CR is present so it should a matter of second for it to become ready + return reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, errors.Wrapf(err, "failed to fetch ceph filesystem %q, cannot create subvolumegroup %q", cephFilesystemSubVolumeGroup.Spec.FilesystemName, cephFilesystemSubVolumeGroup.Name) + } + + // Create or Update ceph filesystem subvolume group + err = r.createOrUpdateSubVolumeGroup(cephFilesystemSubVolumeGroup) + if err != nil { + if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { + logger.Info(opcontroller.OperatorNotInitializedMessage) + return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil + } + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure) + return reconcile.Result{}, errors.Wrapf(err, "failed to create or update ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name) + } + + // Success! Let's update the status + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady) + + // Return and do not requeue + logger.Debug("done reconciling") + return reconcile.Result{}, nil +} + +// Create the ceph filesystem subvolume group +func (r *ReconcileCephFilesystemSubVolumeGroup) createOrUpdateSubVolumeGroup(cephFilesystemSubVolumeGroup *cephv1.CephFilesystemSubVolumeGroup) error { + logger.Infof("creating ceph filesystem subvolume group %s in namespace %s", cephFilesystemSubVolumeGroup.Name, cephFilesystemSubVolumeGroup.Namespace) + + err := cephclient.CreateCephFSSubVolumeGroup(r.context, r.clusterInfo, cephFilesystemSubVolumeGroup.Spec.FilesystemName, cephFilesystemSubVolumeGroup.Name) + if err != nil { + return errors.Wrapf(err, "failed to create ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name) + } + + return nil +} + +// Delete the ceph filesystem subvolume group +func (r *ReconcileCephFilesystemSubVolumeGroup) deleteSubVolumeGroup(cephFilesystemSubVolumeGroup *cephv1.CephFilesystemSubVolumeGroup) error { + logger.Infof("deleting ceph filesystem subvolume group object %q", cephFilesystemSubVolumeGroup.Name) + if err := cephclient.DeleteCephFSSubVolumeGroup(r.context, r.clusterInfo, cephFilesystemSubVolumeGroup.Name, cephFilesystemSubVolumeGroup.Spec.FilesystemName); err != nil { + code, ok := exec.ExitStatus(err) + // If the subvolumegroup has subvolumes the command will fail with: + // Error ENOTEMPTY: error in rmdir /volumes/csi + if ok && code != int(syscall.ENOTEMPTY) { + return errors.Wrapf(err, "failed to delete ceph filesystem subvolume group %q, remove the subvolumes first.", cephFilesystemSubVolumeGroup.Name) + } + + return errors.Wrapf(err, "failed to delete ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name) + } + + logger.Infof("deleted ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name) + return nil +} + +// updateStatus updates an object with a given status +func (r *ReconcileCephFilesystemSubVolumeGroup) updateStatus(client client.Client, name types.NamespacedName, status cephv1.ConditionType) { + cephFilesystemSubVolumeGroup := &cephv1.CephFilesystemSubVolumeGroup{} + if err := client.Get(r.opManagerContext, name, cephFilesystemSubVolumeGroup); err != nil { + if kerrors.IsNotFound(err) { + logger.Debug("CephFilesystemSubVolumeGroup resource not found. Ignoring since object must be deleted.") + return + } + logger.Warningf("failed to retrieve ceph ceph filesystem subvolume group %q to update status to %q. %v", name, status, err) + return + } + if cephFilesystemSubVolumeGroup.Status == nil { + cephFilesystemSubVolumeGroup.Status = &cephv1.CephFilesystemSubVolumeGroupStatus{} + } + + cephFilesystemSubVolumeGroup.Status.Phase = status + if err := reporting.UpdateStatus(client, cephFilesystemSubVolumeGroup); err != nil { + logger.Errorf("failed to set ceph ceph filesystem subvolume group %q status to %q. %v", name, status, err) + return + } + logger.Debugf("ceph ceph filesystem subvolume group %q status updated to %q", name, status) +} diff --git a/pkg/operator/ceph/file/subvolumegroup/controller_test.go b/pkg/operator/ceph/file/subvolumegroup/controller_test.go new file mode 100644 index 0000000000000..6fffbdea56612 --- /dev/null +++ b/pkg/operator/ceph/file/subvolumegroup/controller_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package subvolumegroup + +import ( + "context" + "os" + "testing" + + "github.com/coreos/pkg/capnslog" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/operator/k8sutil" + testop "github.com/rook/rook/pkg/operator/test" + exectest "github.com/rook/rook/pkg/util/exec/test" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestCephClientController(t *testing.T) { + ctx := context.TODO() + // Set DEBUG logging + capnslog.SetGlobalLogLevel(capnslog.DEBUG) + os.Setenv("ROOK_LOG_LEVEL", "DEBUG") + + logger.Info("RUN 1") + var ( + name = "group-a" + namespace = "rook-ceph" + ) + + // A cephFilesystemSubVolumeGroup resource with metadata and spec. + cephFilesystemSubVolumeGroup := &cephv1.CephFilesystemSubVolumeGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("c47cac40-9bee-4d52-823b-ccd803ba5bfe"), + }, + Spec: cephv1.CephFilesystemSubVolumeGroupSpec{ + FilesystemName: namespace, + }, + Status: &cephv1.CephFilesystemSubVolumeGroupStatus{ + Phase: "", + }, + } + + // Objects to track in the fake client. + object := []runtime.Object{ + cephFilesystemSubVolumeGroup, + } + + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "status" { + return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil + } + + return "", nil + }, + } + c := &clusterd.Context{ + Executor: executor, + Clientset: testop.New(t, 1), + RookClientset: rookclient.NewSimpleClientset(), + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephClient{}, &cephv1.CephClusterList{}) + + // Create a fake client to mock API calls. + cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + + // Create a ReconcileCephFilesystemSubVolumeGroup object with the scheme and fake client. + r := &ReconcileCephFilesystemSubVolumeGroup{ + client: cl, + scheme: s, + context: c, + opManagerContext: ctx, + } + + // Mock request to simulate Reconcile() being called on an event for a + // watched resource . + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } + + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, + }, + Status: cephv1.ClusterStatus{ + Phase: "", + CephVersion: &cephv1.ClusterVersion{ + Version: "14.2.9-0", + }, + CephStatus: &cephv1.CephStatus{ + Health: "", + }, + }, + } + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) + + cephFilesystem := &cephv1.CephFilesystem{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, + }, + Status: &cephv1.CephFilesystemStatus{ + Phase: "", + }, + } + + t.Run("error - no ceph cluster", func(t *testing.T) { + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + }) + + t.Run("error - ceph cluster not ready", func(t *testing.T) { + object = append(object, cephCluster) + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + // Create a ReconcileCephFilesystem object with the scheme and fake client. + r = &ReconcileCephFilesystemSubVolumeGroup{client: cl, scheme: s, context: c, opManagerContext: context.TODO()} + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + + cephCluster.Status.Phase = cephv1.ConditionReady + cephCluster.Status.CephStatus.Health = "HEALTH_OK" + }) + + t.Run("error - ceph filesystem cluster not ready", func(t *testing.T) { + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + cephFilesystem.Status.Phase = cephv1.ConditionReady + }) + + t.Run("success - ceph cluster ready, mds are running and subvolumegroup created", func(t *testing.T) { + // Mock clusterInfo + secrets := map[string][]byte{ + "fsid": []byte(name), + "mon-secret": []byte("monsecret"), + "admin-secret": []byte("adminsecret"), + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph-mon", + Namespace: namespace, + }, + Data: secrets, + Type: k8sutil.RookType, + } + _, err := c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + assert.NoError(t, err) + objects := []runtime.Object{ + cephFilesystemSubVolumeGroup, + cephCluster, + cephFilesystem, + } + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() + c.Client = cl + + executor = &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "auth" && args[1] == "fs" && args[2] == "subvolumegroup" && args[3] == "create" { + return "", nil + } + + return "", nil + }, + } + c.Executor = executor + + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) + // Create a ReconcileCephFilesystemSubVolumeGroup object with the scheme and fake client. + r = &ReconcileCephFilesystemSubVolumeGroup{ + client: cl, + scheme: s, + context: c, + opManagerContext: context.TODO(), + } + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + + err = r.client.Get(context.TODO(), req.NamespacedName, cephFilesystemSubVolumeGroup) + assert.NoError(t, err) + assert.Equal(t, cephv1.ConditionReady, cephFilesystemSubVolumeGroup.Status.Phase) + }) +} diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index 25e8ede57fabc..0bf5c9cb08aac 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -216,6 +216,7 @@ function deploy_cluster() { kubectl create -f rbdmirror.yaml kubectl create -f filesystem-mirror.yaml kubectl create -f nfs-test.yaml + kubectl create -f subvolumegroup.yaml deploy_manifest_with_local_build toolbox.yaml }