diff --git a/Documentation/Getting-Started/quickstart.md b/Documentation/Getting-Started/quickstart.md index 9f36a5904d73..3017f21a8d0c 100644 --- a/Documentation/Getting-Started/quickstart.md +++ b/Documentation/Getting-Started/quickstart.md @@ -36,7 +36,7 @@ To configure the Ceph storage cluster, at least one of these local storage optio A simple Rook cluster is created for Kubernetes with the following `kubectl` commands and [example manifests](https://github.com/rook/rook/blob/master/deploy/examples). ```console -$ git clone --single-branch --branch v1.14.0 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.14.1 https://github.com/rook/rook.git cd rook/deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml kubectl create -f cluster.yaml diff --git a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md index 9c34cbe3ff8d..723c8451a41d 100644 --- a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md +++ b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md @@ -47,7 +47,7 @@ There are two sources for metrics collection: From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch v1.14.0 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.14.1 https://github.com/rook/rook.git cd rook/deploy/examples/monitoring ``` diff --git a/Documentation/Upgrade/rook-upgrade.md b/Documentation/Upgrade/rook-upgrade.md index ae858923ac51..159b888085f0 100644 --- a/Documentation/Upgrade/rook-upgrade.md +++ b/Documentation/Upgrade/rook-upgrade.md @@ -133,8 +133,8 @@ In order to successfully upgrade a Rook cluster, the following prerequisites mus ## Rook Operator Upgrade -The examples given in this guide upgrade a live Rook cluster running `v1.13.7` to -the version `v1.14.0`. This upgrade should work from any official patch release of Rook v1.13 to any +The examples given in this guide upgrade a live Rook cluster running `v1.13.8` to +the version `v1.14.1`. This upgrade should work from any official patch release of Rook v1.13 to any official patch release of v1.14. Let's get started! @@ -161,7 +161,7 @@ by the Operator. Also update the Custom Resource Definitions (CRDs). Get the latest common resources manifests that contain the latest changes. ```console -git clone --single-branch --depth=1 --branch v1.14.0 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.14.1 https://github.com/rook/rook.git cd rook/deploy/examples ``` @@ -200,7 +200,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd When the operator is updated, it will proceed to update all of the Ceph daemons. ```console -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.14.0 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.14.1 ``` ### **3. Update Ceph CSI** @@ -230,18 +230,18 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster= ``` As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` -availability and `rook-version=v1.14.0`, the Ceph cluster's core components are fully updated. +availability and `rook-version=v1.14.1`, the Ceph cluster's core components are fully updated. ```console Every 2.0s: kubectl -n rook-ceph get deployment -o j... -rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.14.0 -rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.14.0 -rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.14.0 -rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.14.0 -rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.14.0 -rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.13.7 -rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.13.7 +rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.14.1 +rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.14.1 +rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.14.1 +rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.14.1 +rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.14.1 +rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.13.8 +rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.13.8 ``` An easy check to see if the upgrade is totally finished is to check that there is only one @@ -250,15 +250,15 @@ An easy check to see if the upgrade is totally finished is to check that there i ```console # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: - rook-version=v1.13.7 - rook-version=v1.14.0 + rook-version=v1.13.8 + rook-version=v1.14.1 This cluster is finished: - rook-version=v1.14.0 + rook-version=v1.14.1 ``` ### **5. Verify the updated cluster** -At this point, the Rook operator should be running version `rook/ceph:v1.14.0`. +At this point, the Rook operator should be running version `rook/ceph:v1.14.1`. Verify the CephCluster health using the [health verification doc](health-verification.md). diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 2dacb989f6da..67992650a6c7 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -7,7 +7,7 @@ image: repository: rook/ceph # -- Image tag # @default -- `master` - tag: v1.14.0 + tag: v1.14.1 # -- Image pull policy pullPolicy: IfNotPresent diff --git a/deploy/examples/direct-mount.yaml b/deploy/examples/direct-mount.yaml index 15c8c3e5ac1e..904e21b702a2 100644 --- a/deploy/examples/direct-mount.yaml +++ b/deploy/examples/direct-mount.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: rook-ceph-default containers: - name: rook-direct-mount - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 command: ["/bin/bash"] args: ["-m", "-c", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index d6e849b54d47..95d90d1cda47 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -8,4 +8,4 @@ registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 registry.k8s.io/sig-storage/csi-resizer:v1.10.0 registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 - rook/ceph:v1.14.0 + rook/ceph:v1.14.1 diff --git a/deploy/examples/multus-validation.yaml b/deploy/examples/multus-validation.yaml index 9fef90617c2b..984c711a508e 100644 --- a/deploy/examples/multus-validation.yaml +++ b/deploy/examples/multus-validation.yaml @@ -101,7 +101,7 @@ spec: serviceAccountName: rook-ceph-multus-validation containers: - name: multus-validation - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 command: ["rook"] args: - "multus" diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index ccc9d7825257..0fb2d2e7e78b 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -667,7 +667,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 5a5b1d4a644a..104ad7ee49b9 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -591,7 +591,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/osd-purge.yaml b/deploy/examples/osd-purge.yaml index 0eca0d5b35a3..92aad497c754 100644 --- a/deploy/examples/osd-purge.yaml +++ b/deploy/examples/osd-purge.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. diff --git a/deploy/examples/toolbox-job.yaml b/deploy/examples/toolbox-job.yaml index a402661034d9..46fb6ec63fda 100644 --- a/deploy/examples/toolbox-job.yaml +++ b/deploy/examples/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -29,7 +29,7 @@ spec: mountPath: /var/lib/rook-ceph-mon containers: - name: script - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/deploy/examples/toolbox-operator-image.yaml b/deploy/examples/toolbox-operator-image.yaml index 64c62fa5a53d..45f9d6c97a12 100644 --- a/deploy/examples/toolbox-operator-image.yaml +++ b/deploy/examples/toolbox-operator-image.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: rook-ceph-default containers: - name: rook-ceph-tools-operator-image - image: rook/ceph:v1.14.0 + image: rook/ceph:v1.14.1 command: - /bin/bash - -c