From cef60ed72ea561205af24fd066bad211017c9f5a Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Mon, 27 Sep 2021 15:50:11 -0600 Subject: [PATCH] csi: no longer install the volumereplication crds from rook The volume replication CRDs are an external component, not owned by Rook. Therefore, they should be installed as any other independent component in case the admin will install other consumers of the volumereplication CRDs in the future in addition to Rook and the CSI driver. Signed-off-by: Travis Nielsen (cherry picked from commit c420f2309c3af71c55a8ef31bd0c6616b7ad38e3) # Conflicts: # build/crds/build-crds.sh # build/crds/crds.go # cluster/charts/rook-ceph/templates/resources.yaml # cluster/examples/kubernetes/ceph/crds.yaml # go.mod # go.sum # tests/framework/installer/ceph_installer.go # tests/integration/ceph_object_test.go # tests/integration/ceph_smoke_test.go --- Documentation/ceph-csi-drivers.md | 20 +- build/crds/build-crds.sh | 5 +- .../charts/rook-ceph/templates/resources.yaml | 3 + cluster/charts/rook-ceph/values.yaml | 4 +- cluster/examples/kubernetes/ceph/crds.yaml | 3 + .../kubernetes/ceph/operator-openshift.yaml | 4 +- .../examples/kubernetes/ceph/operator.yaml | 4 +- go.mod | 3 + go.sum | 31 +- images/ceph/Makefile | 15 + tests/framework/installer/ceph_installer.go | 39 ++ tests/framework/installer/settings.go | 4 + tests/integration/ceph_object_test.go | 373 ++++++++++++++++++ tests/integration/ceph_smoke_test.go | 5 + 14 files changed, 477 insertions(+), 36 deletions(-) create mode 100644 tests/integration/ceph_object_test.go diff --git a/Documentation/ceph-csi-drivers.md b/Documentation/ceph-csi-drivers.md index 9c216d00e9175..8c312e44a6c81 100644 --- a/Documentation/ceph-csi-drivers.md +++ b/Documentation/ceph-csi-drivers.md @@ -77,8 +77,18 @@ PVC will be updated to new size. ## RBD Mirroring To support RBD Mirroring, the [Volume Replication Operator](https://github.com/csi-addons/volume-replication-operator/blob/main/README.md) will be started in the RBD provisioner pod. -Volume Replication Operator is a kubernetes operator that provides common and reusable APIs for storage disaster recovery. It is based on [csi-addons/spec](https://github.com/csi-addons/spec) specification and can be used by any storage provider. -It follows controller pattern and provides extended APIs for storage disaster recovery. The extended APIs are provided via Custom Resource Definition (CRD). -To enable volume replication: -- For Helm deployments see the [helm settings](helm-operator.md#configuration). -- For non-Helm deployments set `CSI_ENABLE_VOLUME_REPLICATION: "true"` in the operator.yaml +The Volume Replication Operator is a kubernetes operator that provides common and reusable APIs for storage disaster recovery. It is based on [csi-addons/spec](https://github.com/csi-addons/spec) specification and can be used by any storage provider. +It follows the controller pattern and provides extended APIs for storage disaster recovery. The extended APIs are provided via Custom Resource Definitions (CRDs). + +### Enable volume replication + +1. Install the volume replication CRDs: + +```console +kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml +kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml +``` + +2. Enable the volume replication controller: + - For Helm deployments see the [csi.volumeReplication.enabled setting](helm-operator.md#configuration). + - For non-Helm deployments set `CSI_ENABLE_VOLUME_REPLICATION: "true"` in operator.yaml diff --git a/build/crds/build-crds.sh b/build/crds/build-crds.sh index 7775cb1e0bf60..a50d3f615cac3 100755 --- a/build/crds/build-crds.sh +++ b/build/crds/build-crds.sh @@ -53,6 +53,7 @@ generating_crds_v1() { $YQ_BIN_PATH w -i "${OLM_CATALOG_DIR}"/ceph.rook.io_cephclusters.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.storage.properties.storageClassDeviceSets.items.properties.volumeClaimTemplates.items.properties.metadata.properties.annotations.x-kubernetes-preserve-unknown-fields true } +<<<<<<< HEAD generating_crds_v1alpha2() { "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/rook.io/v1alpha2" output:crd:artifacts:config="$OLM_CATALOG_DIR" # TODO: revisit later @@ -67,6 +68,8 @@ generate_vol_rep_crds() { "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="github.com/csi-addons/volume-replication-operator/api/v1alpha1" output:crd:artifacts:config="$OLM_CATALOG_DIR" } +======= +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) generating_main_crd() { true > "$CEPH_CRDS_FILE_PATH" true > "$CEPH_HELM_CRDS_FILE_PATH" @@ -111,8 +114,6 @@ if [ -z "$NO_OB_OBC_VOL_GEN" ]; then generating_crds_v1alpha2 fi -generate_vol_rep_crds - generating_main_crd for crd in "$OLM_CATALOG_DIR/"*.yaml; do diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/cluster/charts/rook-ceph/templates/resources.yaml index f61290f5fc27c..d05a7fdc45d54 100644 --- a/cluster/charts/rook-ceph/templates/resources.yaml +++ b/cluster/charts/rook-ceph/templates/resources.yaml @@ -8945,6 +8945,7 @@ spec: x-kubernetes-preserve-unknown-fields: true subresources: status: {} +<<<<<<< HEAD --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -9784,6 +9785,8 @@ spec: type: boolean subresources: status: {} +======= +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) --- apiVersion: apiextensions.k8s.io/v1beta1 diff --git a/cluster/charts/rook-ceph/values.yaml b/cluster/charts/rook-ceph/values.yaml index db2eed5925393..956930d2e049e 100644 --- a/cluster/charts/rook-ceph/values.yaml +++ b/cluster/charts/rook-ceph/values.yaml @@ -288,7 +288,9 @@ csi: #cephfsPodLabels: "key1=value1,key2=value2" # Labels to add to the CSI RBD Deployments and DaemonSets Pods. #rbdPodLabels: "key1=value1,key2=value2" - # Enable volume replication controller + # Enable the volume replication controller. + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring volumeReplication: enabled: false #image: "quay.io/csiaddons/volumereplication-operator:v0.1.0" diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/cluster/examples/kubernetes/ceph/crds.yaml index 37adc598e7edc..85a643cb297a7 100644 --- a/cluster/examples/kubernetes/ceph/crds.yaml +++ b/cluster/examples/kubernetes/ceph/crds.yaml @@ -8932,6 +8932,7 @@ spec: x-kubernetes-preserve-unknown-fields: true subresources: status: {} +<<<<<<< HEAD --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -9219,3 +9220,5 @@ status: plural: "" conditions: [] storedVersions: [] +======= +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/cluster/examples/kubernetes/ceph/operator-openshift.yaml index 13a97bf856a18..d2a847291eb3b 100644 --- a/cluster/examples/kubernetes/ceph/operator-openshift.yaml +++ b/cluster/examples/kubernetes/ceph/operator-openshift.yaml @@ -403,7 +403,9 @@ data: # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. ROOK_ENABLE_DISCOVERY_DAEMON: "false" - # Enable volume replication controller + # Enable the volume replication controller + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/cluster/examples/kubernetes/ceph/operator.yaml index 8be01066fbd3e..b51a4006f8b33 100644 --- a/cluster/examples/kubernetes/ceph/operator.yaml +++ b/cluster/examples/kubernetes/ceph/operator.yaml @@ -329,7 +329,9 @@ data: ROOK_ENABLE_DISCOVERY_DAEMON: "false" # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" - # Enable volume replication controller + # Enable the volume replication controller. + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" diff --git a/go.mod b/go.mod index f9fa78158f643..846658fd9c007 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,11 @@ require ( github.com/banzaicloud/k8s-objectmatcher v1.1.0 github.com/ceph/go-ceph v0.11.0 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f +<<<<<<< HEAD github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 +======= +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) github.com/go-ini/ini v1.51.1 github.com/google/go-cmp v0.5.5 github.com/google/uuid v1.1.2 diff --git a/go.sum b/go.sum index bc2d720328489..ab426a02003e8 100644 --- a/go.sum +++ b/go.sum @@ -245,7 +245,6 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -290,8 +289,6 @@ github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -345,10 +342,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/csi-addons/spec v0.1.0 h1:y3TOd7qtnwBQPikGa1VvaL7ObyddAZehYW8DNGBlOyc= -github.com/csi-addons/spec v0.1.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI= -github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb h1:SAD+o8nvVErQkOIa31u1BblVHAXXEPQl7mRc+U5GBp8= -github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb/go.mod h1:cQvrR2fRQ7Z9jbbt3+PGZzFmByNfAH3KW8OuH3bkMbY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -476,11 +469,9 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -972,7 +963,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -1068,10 +1058,15 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +<<<<<<< HEAD github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875 h1:jX3VXgmNOye8XYKjwcTVXcBYcPv3jj657fwX8DN/HiM= github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875/go.mod h1:XpQ9HGG9uF5aJCBP+s6w5kSiyTIVSqCV8+XAE4qms5E= github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= +======= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210907154902-775800a3d0b0 h1:h7ghDWni/MDzN1AXXfvuwU88eazM0lvgntaouyJyALs= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210907154902-775800a3d0b0/go.mod h1:XpQ9HGG9uF5aJCBP+s6w5kSiyTIVSqCV8+XAE4qms5E= +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/lestrrat-go/jwx v0.9.0/go.mod h1:iEoxlYfZjvoGpuWwxUz+eR5e6KTJGsaRcy/YNA/UnBk= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -1224,7 +1219,6 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -1237,7 +1231,6 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1449,7 +1442,6 @@ github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJa github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -1535,7 +1527,6 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/atlas v0.7.1 h1:hNBtwtKgmhB9vmSX/JyN/cArmhzyy4ihKpmXSMIc4mw= go.mongodb.org/atlas v0.7.1/go.mod h1:CIaBeO8GLHhtYLw7xSSXsw7N90Z4MFY87Oy9qcPyuEs= @@ -1578,12 +1569,10 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1806,7 +1795,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1935,7 +1923,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= @@ -2122,7 +2109,6 @@ k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:Ixke k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apiextensions-apiserver v0.15.7/go.mod h1:ctb/NYtsiBt6CGN42Z+JrOkxi9nJYaKZYmatJ6SUy0Y= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= -k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= @@ -2145,7 +2131,6 @@ k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJum k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.15.7/go.mod h1:d5Dbyt588GbBtUnbx9fSK+pYeqgZa32op+I1BmXiNuE= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= -k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= @@ -2167,15 +2152,12 @@ k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m k8s.io/code-generator v0.15.7/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.15.7/go.mod h1:iunfIII6uq3NC3S/EhBpKv8+eQ76vwlOYdFpyIeBk7g= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= -k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= @@ -2221,7 +2203,6 @@ k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2237,12 +2218,10 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= -sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= diff --git a/images/ceph/Makefile b/images/ceph/Makefile index e0d4278626f2b..467776866ebe7 100755 --- a/images/ceph/Makefile +++ b/images/ceph/Makefile @@ -47,6 +47,11 @@ $(info NOT INCLUDING OLM/CSV TEMPLATES!) $(info ) endif +VOL_REPL_VERSION = v0.1.0 +VOL_REPL_URL = https://raw.githubusercontent.com/csi-addons/volume-replication-operator/$(VOL_REPL_VERSION)/config/crd/bases +VOLUME_REPLICATION_CRD = replication.storage.openshift.io_volumereplications.yaml +VOLUME_REPLICATION_CLASS_CRD = replication.storage.openshift.io_volumereplicationclasses.yaml + OPERATOR_SDK := $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION) YQ := $(TOOLS_HOST_DIR)/yq-$(YQ_VERSION) export OPERATOR_SDK YQ @@ -70,6 +75,7 @@ do.build: ifeq ($(INCLUDE_CSV_TEMPLATES),true) @$(MAKE) CSV_TEMPLATE_DIR=$(TEMP) generate-csv-templates + @$(MAKE) CRD_TEMPLATE_DIR=$(TEMP)/cluster/olm/ceph/templates/crds/ get-volume-replication-crds @cp -r $(TEMP)/cluster/olm/ceph/templates $(TEMP)/ceph-csv-templates else mkdir $(TEMP)/ceph-csv-templates @@ -106,6 +112,15 @@ generate-csv-templates: $(OPERATOR_SDK) $(YQ) ## Generate CSV templates for OLM @OLM_CATALOG_DIR=$(CSV_TEMPLATE_DIR)/cluster/olm/ceph ../../cluster/olm/ceph/generate-rook-csv-templates.sh @echo " === Generated CSV templates can be found at $(CSV_TEMPLATE_DIR)/cluster/olm/ceph/templates" +get-volume-replication-crds: + @if [[ -z "$(CRD_TEMPLATE_DIR)" ]]; then echo "CRD_TEMPLATE_DIR is not set"; exit 1; fi + @if [[ ! -d "$(CACHE_DIR)/crds" ]]; then\ + mkdir -p $(CACHE_DIR)/crds;\ + curl -L $(VOL_REPL_URL)/$(VOLUME_REPLICATION_CRD) -o $(CACHE_DIR)/crds/$(VOLUME_REPLICATION_CRD);\ + curl -L $(VOL_REPL_URL)/$(VOLUME_REPLICATION_CLASS_CRD) -o $(CACHE_DIR)/crds/$(VOLUME_REPLICATION_CLASS_CRD);\ + fi + @cp $(CACHE_DIR)/crds/* $(CRD_TEMPLATE_DIR) + $(YQ): @echo === installing yq $(GOHOST) @mkdir -p $(TOOLS_HOST_DIR) diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go index 7e4daea7e362f..00944ddf1ca8d 100644 --- a/tests/framework/installer/ceph_installer.go +++ b/tests/framework/installer/ceph_installer.go @@ -61,14 +61,28 @@ const ( osd_pool_default_size = 1 bdev_flock_retry = 20 ` + volumeReplicationVersion = "v0.1.0" ) var ( +<<<<<<< HEAD NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} +======= + NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} + NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} + OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} + OctopusDevelVersion = cephv1.CephVersionSpec{Image: octopusDevelTestImage} + PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} + PacificDevelVersion = cephv1.CephVersionSpec{Image: pacificDevelTestImage} + MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} + volumeReplicationBaseURL = fmt.Sprintf("https://raw.githubusercontent.com/csi-addons/volume-replication-operator/%s/config/crd/bases/", volumeReplicationVersion) + volumeReplicationCRDURL = volumeReplicationBaseURL + "replication.storage.openshift.io_volumereplications.yaml" + volumeReplicationClassCRDURL = volumeReplicationBaseURL + "replication.storage.openshift.io_volumereplicationclasses.yaml" +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) ) // CephInstaller wraps installing and uninstalling rook on a platform @@ -114,6 +128,10 @@ func (h *CephInstaller) CreateCephOperator() (err error) { return errors.Errorf("Failed to start admission controllers: %v", err) } + if err := h.CreateVolumeReplicationCRDs(); err != nil { + return errors.Wrap(err, "failed to create volume replication CRDs") + } + _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetOperator(), createFromStdinArgs...) if err != nil { return errors.Errorf("Failed to create rook-operator pod: %v", err) @@ -123,6 +141,27 @@ func (h *CephInstaller) CreateCephOperator() (err error) { return nil } +func (h *CephInstaller) CreateVolumeReplicationCRDs() (err error) { + if !h.Manifests.Settings().EnableVolumeReplication { + logger.Info("volume replication CRDs skipped") + return nil + } + if !h.k8shelper.VersionAtLeast("v1.16.0") { + logger.Info("volume replication CRDs skipped on older than k8s 1.16") + return nil + } + + logger.Info("Creating volume replication CRDs") + if _, err := h.k8shelper.KubectlWithStdin(readManifestFromURL(volumeReplicationCRDURL), createFromStdinArgs...); err != nil { + return errors.Wrap(err, "failed to create volumereplication CRD") + } + + if _, err := h.k8shelper.KubectlWithStdin(readManifestFromURL(volumeReplicationClassCRDURL), createFromStdinArgs...); err != nil { + return errors.Wrap(err, "failed to create volumereplicationclass CRD") + } + return nil +} + func (h *CephInstaller) startAdmissionController() error { if !h.k8shelper.VersionAtLeast("v1.16.0") { logger.Info("skipping the admission controller on K8s version older than v1.16") diff --git a/tests/framework/installer/settings.go b/tests/framework/installer/settings.go index 7bb7a4c2339f2..4b268b2fbf09e 100644 --- a/tests/framework/installer/settings.go +++ b/tests/framework/installer/settings.go @@ -46,6 +46,10 @@ func readManifest(provider, filename string) string { func readManifestFromGithub(rookVersion, provider, filename string) string { url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s/cluster/examples/kubernetes/%s/%s", rookVersion, provider, filename) + return readManifestFromURL(url) +} + +func readManifestFromURL(url string) string { logger.Infof("Retrieving manifest: %s", url) var response *http.Response var err error diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go new file mode 100644 index 0000000000000..358d8045af566 --- /dev/null +++ b/tests/integration/ceph_object_test.go @@ -0,0 +1,373 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "encoding/json" + "testing" + "time" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/daemon/ceph/client" + rgw "github.com/rook/rook/pkg/operator/ceph/object" + "github.com/rook/rook/tests/framework/clients" + "github.com/rook/rook/tests/framework/installer" + "github.com/rook/rook/tests/framework/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + objectStoreServicePrefixUniq = "rook-ceph-rgw-" + objectStoreTLSName = "tls-test-store" +) + +var ( + objectStoreServicePrefix = "rook-ceph-rgw-" +) + +func TestCephObjectSuite(t *testing.T) { + if installer.SkipTestSuite(installer.CephTestSuite) { + t.Skip() + } + + s := new(ObjectSuite) + defer func(s *ObjectSuite) { + HandlePanics(recover(), s.TearDownSuite, s.T) + }(s) + suite.Run(t, s) +} + +type ObjectSuite struct { + suite.Suite + helper *clients.TestClient + settings *installer.TestCephSettings + installer *installer.CephInstaller + k8sh *utils.K8sHelper +} + +func (s *ObjectSuite) SetupSuite() { + namespace := "object-ns" + s.settings = &installer.TestCephSettings{ + ClusterName: "object-cluster", + Namespace: namespace, + OperatorNamespace: installer.SystemNamespace(namespace), + StorageClassName: installer.StorageClassName(), + UseHelm: false, + UsePVC: installer.UsePVC(), + Mons: 3, + SkipOSDCreation: false, + EnableAdmissionController: true, + UseCrashPruner: true, + EnableVolumeReplication: false, + RookVersion: installer.LocalBuildTag, + CephVersion: installer.ReturnCephVersion(), + } + s.settings.ApplyEnvVars() + s.installer, s.k8sh = StartTestCluster(s.T, s.settings) + s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests) +} + +func (s *ObjectSuite) AfterTest(suiteName, testName string) { + s.installer.CollectOperatorLog(suiteName, testName) +} + +func (s *ObjectSuite) TearDownSuite() { + s.installer.UninstallRook() +} + +func (s *ObjectSuite) TestWithTLS() { + if utils.IsPlatformOpenShift() { + s.T().Skip("object store tests skipped on openshift") + } + + tls := true + objectStoreServicePrefix = objectStoreServicePrefixUniq + runObjectE2ETest(s.helper, s.k8sh, s.Suite, s.settings.Namespace, tls) + err := s.k8sh.Clientset.CoreV1().Secrets(s.settings.Namespace).Delete(context.TODO(), objectTLSSecretName, metav1.DeleteOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + logger.Fatal("failed to deleted store TLS secret") + } + } + logger.Info("successfully deleted store TLS secret") +} + +func (s *ObjectSuite) TestWithoutTLS() { + if utils.IsPlatformOpenShift() { + s.T().Skip("object store tests skipped on openshift") + } + + tls := false + objectStoreServicePrefix = objectStoreServicePrefixUniq + runObjectE2ETest(s.helper, s.k8sh, s.Suite, s.settings.Namespace, tls) +} + +// Smoke Test for ObjectStore - Test check the following operations on ObjectStore in order +// Create object store, Create User, Connect to Object Store, Create Bucket, Read/Write/Delete to bucket, +// Check issues in MGRs, Delete Bucket and Delete user +// Test for ObjectStore with and without TLS enabled +func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, tlsEnable bool) { + storeName := "test-store" + if tlsEnable { + storeName = objectStoreTLSName + } + + logger.Infof("Running on Rook Cluster %s", namespace) + createCephObjectStore(s.T(), helper, k8sh, namespace, storeName, 3, tlsEnable) + + // test that a second object store can be created (and deleted) while the first exists + s.T().Run("run a second object store", func(t *testing.T) { + otherStoreName := "other-" + storeName + // The lite e2e test is perfect, as it only creates a cluster, checks that it is healthy, + // and then deletes it. + deleteStore := true + runObjectE2ETestLite(t, helper, k8sh, namespace, otherStoreName, 1, deleteStore, tlsEnable) + }) + if tlsEnable { + // test that a third object store can be created (and deleted) while the first exists + s.T().Run("run a third object store with broken tls", func(t *testing.T) { + otherStoreName := "broken-" + storeName + // The lite e2e test is perfect, as it only creates a cluster, checks that it is healthy, + // and then deletes it. + deleteStore := true + objectStoreServicePrefix = objectStoreServicePrefixUniq + runObjectE2ETestLite(t, helper, k8sh, namespace, otherStoreName, 1, deleteStore, tlsEnable) + objectStoreServicePrefix = objectStoreServicePrefixUniq + }) + } + + // now test operation of the first object store + testObjectStoreOperations(s, helper, k8sh, namespace, storeName) +} + +func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { + ctx := context.TODO() + clusterInfo := client.AdminClusterInfo(namespace) + t := s.T() + + t.Run("create CephObjectStoreUser", func(t *testing.T) { + createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true, true) + i := 0 + for i = 0; i < 4; i++ { + if helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid) { + break + } + logger.Info("waiting 5 more seconds for user secret to exist") + time.Sleep(5 * time.Second) + } + assert.NotEqual(t, 4, i) + }) + + context := k8sh.MakeContext() + objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) + assert.Nil(t, err) + rgwcontext, err := rgw.NewMultisiteContext(context, clusterInfo, objectStore) + assert.Nil(t, err) + t.Run("create ObjectBucketClaim", func(t *testing.T) { + logger.Infof("create OBC %q with storageclass %q - using reclaim policy 'delete' so buckets don't block deletion", obcName, bucketStorageClassName) + cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + assert.Nil(t, cobErr) + cobcErr := helper.BucketClient.CreateObc(obcName, bucketStorageClassName, bucketname, maxObject, true) + assert.Nil(t, cobcErr) + + created := utils.Retry(12, 2*time.Second, "OBC is created", func() bool { + return helper.BucketClient.CheckOBC(obcName, "bound") + }) + assert.True(t, created) + logger.Info("OBC created successfully") + + var bkt rgw.ObjectBucket + i := 0 + for i = 0; i < 4; i++ { + b, code, err := rgw.GetBucket(rgwcontext, bucketname) + if b != nil && err == nil { + bkt = *b + break + } + logger.Warningf("cannot get bucket %q, retrying... bucket: %v. code: %d, err: %v", bucketname, b, code, err) + logger.Infof("(%d) check bucket exists, sleeping for 5 seconds ...", i) + time.Sleep(5 * time.Second) + } + assert.NotEqual(t, 4, i) + assert.Equal(t, bucketname, bkt.Name) + logger.Info("OBC, Secret and ConfigMap created") + }) + + t.Run("S3 access to OBC bucket", func(t *testing.T) { + var s3client *rgw.S3Agent + s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) + s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) + if objectStore.Spec.IsTLSEnabled() { + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true) + } else { + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true, nil) + } + + assert.Nil(t, err) + logger.Infof("endpoint (%s) Accesskey (%s) secret (%s)", s3endpoint, s3AccessKey, s3SecretKey) + + t.Run("put object", func(t *testing.T) { + _, poErr := s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey1, contentType) + assert.Nil(t, poErr) + }) + + t.Run("get object", func(t *testing.T) { + read, err := s3client.GetObjectInBucket(bucketname, ObjectKey1) + assert.Nil(t, err) + assert.Equal(t, ObjBody, read) + }) + + t.Run("quota enforcement", func(t *testing.T) { + _, poErr := s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey2, contentType) + assert.Nil(t, poErr) + logger.Infof("Testing the max object limit") + _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey3, contentType) + assert.Error(t, poErr) + }) + + t.Run("update quota limits", func(t *testing.T) { + poErr := helper.BucketClient.UpdateObc(obcName, bucketStorageClassName, bucketname, newMaxObject, true) + assert.Nil(t, poErr) + updated := utils.Retry(5, 2*time.Second, "OBC is updated", func() bool { + return helper.BucketClient.CheckOBMaxObject(obcName, newMaxObject) + }) + assert.True(t, updated) + logger.Infof("Testing the updated object limit") + _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey3, contentType) + assert.NoError(t, poErr) + _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey4, contentType) + assert.Error(t, poErr) + }) + + t.Run("delete objects", func(t *testing.T) { + _, delobjErr := s3client.DeleteObjectInBucket(bucketname, ObjectKey1) + assert.Nil(t, delobjErr) + _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey2) + assert.Nil(t, delobjErr) + _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey3) + assert.Nil(t, delobjErr) + logger.Info("Objects deleted on bucket successfully") + }) + }) + + t.Run("Regression check: OBC does not revert to Pending phase", func(t *testing.T) { + // A bug exists in older versions of lib-bucket-provisioner that will revert a bucket and claim + // back to "Pending" phase after being created and initially "Bound" by looping infinitely in + // the bucket provision/creation loop. Verify that the OBC is "Bound" and stays that way. + // The OBC reconcile loop runs again immediately b/c the OBC is modified to refer to its OB. + // Wait a short amount of time before checking just to be safe. + time.Sleep(15 * time.Second) + assert.True(t, helper.BucketClient.CheckOBC(obcName, "bound")) + }) + + t.Run("delete CephObjectStore should be blocked by OBC bucket and CephObjectStoreUser", func(t *testing.T) { + deleteObjectStore(t, k8sh, namespace, storeName) + + store := &cephv1.CephObjectStore{} + i := 0 + for i = 0; i < 4; i++ { + storeStr, err := k8sh.GetResource("-n", namespace, "CephObjectStore", storeName, "-o", "json") + assert.NoError(t, err) + + err = json.Unmarshal([]byte(storeStr), &store) + assert.NoError(t, err) + + cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) + if cond != nil { + break + } + logger.Info("waiting 2 more seconds for CephObjectStore to reach Deleting state") + time.Sleep(2 * time.Second) + } + assert.NotEqual(t, 4, i) + + assert.Equal(t, cephv1.ConditionDeleting, store.Status.Phase) // phase == "Deleting" + // verify deletion is blocked b/c object has dependents + cond := cephv1.FindStatusCondition(store.Status.Conditions, cephv1.ConditionDeletionIsBlocked) + logger.Infof("condition: %+v", cond) + assert.Equal(t, v1.ConditionTrue, cond.Status) + assert.Equal(t, cephv1.ObjectHasDependentsReason, cond.Reason) + // the CephObjectStoreUser and the bucket should both block deletion + assert.Contains(t, cond.Message, "CephObjectStoreUsers") + assert.Contains(t, cond.Message, userid) + assert.Contains(t, cond.Message, "buckets") + assert.Contains(t, cond.Message, bucketname) + + // The event is created by the same method that adds that condition, so we can be pretty + // sure it exists here. No need to do extra work to validate the event. + }) + + t.Run("delete OBC", func(t *testing.T) { + i := 0 + dobcErr := helper.BucketClient.DeleteObc(obcName, bucketStorageClassName, bucketname, maxObject, true) + assert.Nil(t, dobcErr) + logger.Info("Checking to see if the obc, secret, and cm have all been deleted") + for i = 0; i < 4 && !helper.BucketClient.CheckOBC(obcName, "deleted"); i++ { + logger.Infof("(%d) obc deleted check, sleeping for 5 seconds ...", i) + time.Sleep(5 * time.Second) + } + assert.NotEqual(t, 4, i) + + logger.Info("ensure OBC bucket was deleted") + var rgwErr int + for i = 0; i < 4; i++ { + _, rgwErr, _ = rgw.GetBucket(rgwcontext, bucketname) + if rgwErr == rgw.RGWErrorNotFound { + break + } + logger.Infof("(%d) check bucket deleted, sleeping for 5 seconds ...", i) + time.Sleep(5 * time.Second) + } + assert.NotEqual(t, 4, i) + assert.Equal(t, rgwErr, rgw.RGWErrorNotFound) + + dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + assert.Nil(t, dobErr) + }) + + t.Run("delete CephObjectStoreUser", func(t *testing.T) { + dosuErr := helper.ObjectUserClient.Delete(namespace, userid) + assert.Nil(t, dosuErr) + logger.Info("Object store user deleted successfully") + logger.Info("Checking to see if the user secret has been deleted") + i := 0 + for i = 0; i < 4 && helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid) == true; i++ { + logger.Infof("(%d) secret check sleeping for 5 seconds ...", i) + time.Sleep(5 * time.Second) + } + assert.False(t, helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid)) + }) + + t.Run("Regression check: mgrs are not in a crashloop", func(t *testing.T) { + assert.True(t, k8sh.CheckPodCountAndState("rook-ceph-mgr", namespace, 1, "Running")) + }) + + t.Run("CephObjectStore should delete now that dependents are gone", func(t *testing.T) { + // wait initially since it will almost never detect on the first try without this. + time.Sleep(3 * time.Second) + + assertObjectStoreDeletion(t, k8sh, namespace, storeName) + }) + + // TODO : Add case for brownfield/cleanup s3 client} +} diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go index 0bece79fca037..9b5816d3ce8bb 100644 --- a/tests/integration/ceph_smoke_test.go +++ b/tests/integration/ceph_smoke_test.go @@ -99,14 +99,19 @@ func (s *SmokeSuite) SetupSuite() { UseCSI: true, EnableAdmissionController: true, UseCrashPruner: true, + EnableVolumeReplication: true, RookVersion: installer.LocalBuildTag, CephVersion: installer.PacificVersion, } s.settings.ApplyEnvVars() +<<<<<<< HEAD s.installer, s.k8sh = StartTestCluster(s.T, s.settings, smokeSuiteMinimalTestVersion) if s.k8sh.VersionAtLeast("v1.16.0") { s.settings.EnableVolumeReplication = true } +======= + s.installer, s.k8sh = StartTestCluster(s.T, s.settings) +>>>>>>> c420f2309 (csi: no longer install the volumereplication crds from rook) s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests) }