From d27303a074a1b4612095b1915fceb563bd03ebc5 Mon Sep 17 00:00:00 2001 From: Blaine Gardner Date: Thu, 16 Dec 2021 12:36:55 -0700 Subject: [PATCH] build: update examples and manifests for v1.8.1 Signed-off-by: Blaine Gardner --- .github/workflows/canary-integration-test.yml | 2 +- Documentation/ceph-monitoring.md | 2 +- Documentation/ceph-upgrade.md | 24 +++++++++---------- Documentation/quickstart.md | 2 +- deploy/examples/direct-mount.yaml | 2 +- deploy/examples/images.txt | 2 +- deploy/examples/operator-openshift.yaml | 2 +- deploy/examples/operator.yaml | 2 +- deploy/examples/osd-purge.yaml | 2 +- deploy/examples/toolbox-job.yaml | 4 ++-- deploy/examples/toolbox.yaml | 2 +- 11 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index a72f31b84ffb..0cc6fefaba3e 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -76,7 +76,7 @@ jobs: sed -i 's//1/' deploy/examples/osd-purge.yaml # the CI must force the deletion since we use replica 1 on 2 OSDs sed -i 's/false/true/' deploy/examples/osd-purge.yaml - sed -i 's|rook/ceph:master|rook/ceph:local-build|' deploy/examples/osd-purge.yaml + sed -i 's|rook/ceph:v1.8.1|rook/ceph:local-build|' deploy/examples/osd-purge.yaml kubectl -n rook-ceph create -f deploy/examples/osd-purge.yaml toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') kubectl -n rook-ceph exec $toolbox -- ceph status diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index e6bce4bf79d1..a16fb2f1508a 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch v1.8.0 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.8.1 https://github.com/rook/rook.git cd rook/deploy/examples/monitoring ``` diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index cb903ea20568..36fe5baf5685 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -266,7 +266,7 @@ Any pod that is using a Rook volume should also remain healthy: ## Rook Operator Upgrade Process In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.7.8` to -the version `v1.8.0`. This upgrade should work from any official patch release of Rook v1.7 to any +the version `v1.8.1`. This upgrade should work from any official patch release of Rook v1.7 to any official patch release of v1.8. **Rook release from `master` are expressly unsupported.** It is strongly recommended that you use @@ -291,7 +291,7 @@ by the Operator. Also update the Custom Resource Definitions (CRDs). Get the latest common resources manifests that contain the latest changes. ```sh -git clone --single-branch --depth=1 --branch v1.8.0 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.8.1 https://github.com/rook/rook.git cd rook/deploy/examples ``` @@ -343,7 +343,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd When the operator is updated, it will proceed to update all of the Ceph daemons. ```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.0 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.1 ``` #### Admission controller @@ -377,16 +377,16 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster= ``` As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` -availability and `rook-version=v1.8.0`, the Ceph cluster's core components are fully updated. +availability and `rook-version=v1.8.1`, the Ceph cluster's core components are fully updated. >``` >Every 2.0s: kubectl -n rook-ceph get deployment -o j... > ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.0 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.0 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.0 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.0 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.0 +>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.1 +>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.1 +>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.1 +>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.1 +>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.1 >rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.7.8 >rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.7.8 >``` @@ -398,14 +398,14 @@ An easy check to see if the upgrade is totally finished is to check that there i # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: rook-version=v1.7.8 - rook-version=v1.8.0 + rook-version=v1.8.1 This cluster is finished: - rook-version=v1.8.0 + rook-version=v1.8.1 ``` ### **5. Verify the updated cluster** -At this point, your Rook operator should be running version `rook/ceph:v1.8.0`. +At this point, your Rook operator should be running version `rook/ceph:v1.8.1`. Verify the Ceph cluster's health using the [health verification section](#health-verification). diff --git a/Documentation/quickstart.md b/Documentation/quickstart.md index 262ffd9e0b7e..21f2fc7ebc85 100644 --- a/Documentation/quickstart.md +++ b/Documentation/quickstart.md @@ -34,7 +34,7 @@ In order to configure the Ceph storage cluster, at least one of these local stor A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples). ```console -$ git clone --single-branch --branch v1.8.0 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.8.1 https://github.com/rook/rook.git cd rook/deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml kubectl create -f cluster.yaml diff --git a/deploy/examples/direct-mount.yaml b/deploy/examples/direct-mount.yaml index 964f2531be25..f8bf3316b126 100644 --- a/deploy/examples/direct-mount.yaml +++ b/deploy/examples/direct-mount.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-direct-mount - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 command: ["/bin/bash"] args: ["-m", "-c", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index 9a2e3dd9370d..adf83cd30bdc 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -6,4 +6,4 @@ quay.io/ceph/ceph:v16.2.7 quay.io/cephcsi/cephcsi:v3.4.0 quay.io/csiaddons/volumereplication-operator:v0.1.0 - rook/ceph:v1.8.0 + rook/ceph:v1.8.1 diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 13b47a7c762d..9f215b54ac56 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -445,7 +445,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 7e133e3d9a26..792234242705 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -362,7 +362,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/osd-purge.yaml b/deploy/examples/osd-purge.yaml index 5e30ed414db0..24f6ae7710e2 100644 --- a/deploy/examples/osd-purge.yaml +++ b/deploy/examples/osd-purge.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. diff --git a/deploy/examples/toolbox-job.yaml b/deploy/examples/toolbox-job.yaml index d840946c58ed..755cac8b6bbe 100644 --- a/deploy/examples/toolbox-job.yaml +++ b/deploy/examples/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -32,7 +32,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/deploy/examples/toolbox.yaml b/deploy/examples/toolbox.yaml index bfc9a4df06d2..fab9b74dd75c 100644 --- a/deploy/examples/toolbox.yaml +++ b/deploy/examples/toolbox.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.8.0 + image: rook/ceph:v1.8.1 command: ["/bin/bash"] args: ["-m", "-c", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent