diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index f5847b212326..d16741144c3f 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -140,12 +140,12 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: | @@ -219,11 +219,11 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -287,12 +287,12 @@ jobs: - name: deploy rook run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -357,11 +357,11 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -428,10 +428,10 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -498,11 +498,11 @@ jobs: - name: deploy rook run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -572,7 +572,7 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-kms-vault.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 @@ -581,7 +581,7 @@ jobs: yq merge --inplace --arrays append tests/manifests/test-object.yaml tests/manifests/test-kms-vault-spec.yaml sed -i 's/ver1/ver2/g' tests/manifests/test-object.yaml kubectl create -f tests/manifests/test-object.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -653,10 +653,10 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index 16988f7877df..83593fc99fd8 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch v1.7.3 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.7.4 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph/monitoring ``` diff --git a/Documentation/ceph-toolbox.md b/Documentation/ceph-toolbox.md index b78ddbddcfc1..878823c32a8a 100644 --- a/Documentation/ceph-toolbox.md +++ b/Documentation/ceph-toolbox.md @@ -43,7 +43,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent @@ -133,7 +133,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -155,7 +155,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index 16599f5b85ac..d8abe89cccf5 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -53,12 +53,12 @@ With this upgrade guide, there are a few notes to consider: Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to another are as simple as updating the common resources and the image of the Rook operator. For -example, when Rook v1.7.3 is released, the process of updating from v1.7.0 is as simple as running +example, when Rook v1.7.4 is released, the process of updating from v1.7.0 is as simple as running the following: First get the latest common resources manifests that contain the latest changes for Rook v1.7. ```sh -git clone --single-branch --depth=1 --branch v1.7.3 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.7.4 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` @@ -69,7 +69,7 @@ section for instructions on how to change the default namespaces in `common.yaml Then apply the latest changes from v1.7 and update the Rook Operator image. ```console kubectl apply -f common.yaml -f crds.yaml -kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.3 +kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.4 ``` As exemplified above, it is a good practice to update Rook-Ceph common resources from the example @@ -249,7 +249,7 @@ Any pod that is using a Rook volume should also remain healthy: ## Rook Operator Upgrade Process In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.6.8` to -the version `v1.7.3`. This upgrade should work from any official patch release of Rook v1.6 to any +the version `v1.7.4`. This upgrade should work from any official patch release of Rook v1.6 to any official patch release of v1.7. **Rook release from `master` are expressly unsupported.** It is strongly recommended that you use @@ -279,7 +279,7 @@ needed by the Operator. Also update the Custom Resource Definitions (CRDs). First get the latest common resources manifests that contain the latest changes. ```sh -git clone --single-branch --depth=1 --branch v1.7.3 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.7.4 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` @@ -325,7 +325,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd When the operator is updated, it will proceed to update all of the Ceph daemons. ```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.3 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.4 ``` ### **4. Wait for the upgrade to complete** @@ -341,16 +341,16 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster= ``` As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` -availability and `rook-version=v1.7.3`, the Ceph cluster's core components are fully updated. +availability and `rook-version=v1.7.4`, the Ceph cluster's core components are fully updated. >``` >Every 2.0s: kubectl -n rook-ceph get deployment -o j... > ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.7.3 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.7.3 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.7.3 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.7.3 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.7.3 +>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.7.4 +>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.7.4 +>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.7.4 +>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.7.4 +>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.7.4 >rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.6.8 >rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.6.8 >``` @@ -362,14 +362,14 @@ An easy check to see if the upgrade is totally finished is to check that there i # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: rook-version=v1.6.8 - rook-version=v1.7.3 + rook-version=v1.7.4 This cluster is finished: - rook-version=v1.7.3 + rook-version=v1.7.4 ``` ### **5. Verify the updated cluster** -At this point, your Rook operator should be running version `rook/ceph:v1.7.3`. +At this point, your Rook operator should be running version `rook/ceph:v1.7.4`. Verify the Ceph cluster's health using the [health verification section](#health-verification). diff --git a/cluster/examples/kubernetes/ceph/direct-mount.yaml b/cluster/examples/kubernetes/ceph/direct-mount.yaml index 384125860594..09193d9621c5 100644 --- a/cluster/examples/kubernetes/ceph/direct-mount.yaml +++ b/cluster/examples/kubernetes/ceph/direct-mount.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-direct-mount - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/cluster/examples/kubernetes/ceph/images.txt b/cluster/examples/kubernetes/ceph/images.txt index bd13c4be59ab..5e0361b6307d 100644 --- a/cluster/examples/kubernetes/ceph/images.txt +++ b/cluster/examples/kubernetes/ceph/images.txt @@ -1,4 +1,4 @@ - rook/ceph:v1.7.3 + rook/ceph:v1.7.4 quay.io/ceph/ceph:v16.2.6 quay.io/cephcsi/cephcsi:v3.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/cluster/examples/kubernetes/ceph/operator-openshift.yaml index d2bcb0615721..692f19a91b71 100644 --- a/cluster/examples/kubernetes/ceph/operator-openshift.yaml +++ b/cluster/examples/kubernetes/ceph/operator-openshift.yaml @@ -444,7 +444,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/cluster/examples/kubernetes/ceph/operator.yaml index 16b556831253..2f7f17556171 100644 --- a/cluster/examples/kubernetes/ceph/operator.yaml +++ b/cluster/examples/kubernetes/ceph/operator.yaml @@ -367,7 +367,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/osd-purge.yaml b/cluster/examples/kubernetes/ceph/osd-purge.yaml index ad3d2b76464a..279cfcde8fec 100644 --- a/cluster/examples/kubernetes/ceph/osd-purge.yaml +++ b/cluster/examples/kubernetes/ceph/osd-purge.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. diff --git a/cluster/examples/kubernetes/ceph/toolbox-job.yaml b/cluster/examples/kubernetes/ceph/toolbox-job.yaml index 948cf988dbdf..9c1b11a89c3c 100644 --- a/cluster/examples/kubernetes/ceph/toolbox-job.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -32,7 +32,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/cluster/examples/kubernetes/ceph/toolbox.yaml b/cluster/examples/kubernetes/ceph/toolbox.yaml index 79efb291d140..50bbed9a176d 100644 --- a/cluster/examples/kubernetes/ceph/toolbox.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.7.3 + image: rook/ceph:v1.7.4 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/tests/integration/ceph_base_object_test.go b/tests/integration/ceph_base_object_test.go index 7f661630fb9b..e8b601c23f44 100644 --- a/tests/integration/ceph_base_object_test.go +++ b/tests/integration/ceph_base_object_test.go @@ -154,7 +154,7 @@ func checkCephObjectUser( assert.Equal(s.T(), k8sutil.ReadyStatus, phase) } if checkQuotaAndCaps { - // following fields in CephObjectStoreUser CRD doesn't exist before Rook v1.7.3 + // following fields in CephObjectStoreUser CRD doesn't exist before Rook v1.7.4 maxObjectInt, err := strconv.Atoi(maxObject) assert.Nil(s.T(), err) maxSizeInt, err := strconv.Atoi(maxSize) diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index d8f0beee1ce4..3f15f80867ec 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -143,9 +143,14 @@ function create_cluster_prerequisites() { kubectl create -f crds.yaml -f common.yaml } +function deploy_manifest_with_local_build() { + sed -i "s|image: rook/ceph:v1.7.4|image: rook/ceph:local-build|g" $1 + kubectl create -f $1 +} + function deploy_cluster() { cd cluster/examples/kubernetes/ceph - kubectl create -f operator.yaml + deploy_manifest_with_local_build operator.yaml sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\/}|g" cluster-test.yaml kubectl create -f cluster-test.yaml kubectl create -f object-test.yaml @@ -154,7 +159,7 @@ function deploy_cluster() { kubectl create -f rbdmirror.yaml kubectl create -f filesystem-mirror.yaml kubectl create -f nfs-test.yaml - kubectl create -f toolbox.yaml + deploy_manifest_with_local_build toolbox.yaml } function wait_for_prepare_pod() { @@ -252,7 +257,9 @@ selected_function="$1" if [ "$selected_function" = "generate_tls_config" ]; then $selected_function $2 $3 $4 $5 elif [ "$selected_function" = "wait_for_ceph_to_be_ready" ]; then - $selected_function $2 $3 + $selected_function $2 $3 +elif [ "$selected_function" = "deploy_manifest_with_local_build" ]; then + $selected_function $2 else $selected_function fi