Skip to content

Commit

Permalink
Merge pull request #8811 from travisn/release-1.6.10
Browse files Browse the repository at this point in the history
build: Update release version to v1.6.10
  • Loading branch information
travisn committed Sep 23, 2021
2 parents f36318b + d892091 commit f793f94
Show file tree
Hide file tree
Showing 24 changed files with 55 additions and 55 deletions.
18 changes: 9 additions & 9 deletions .github/workflows/canary-integration-test.yml
Expand Up @@ -52,7 +52,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: validate-yaml
run: tests/scripts/github-action-helper.sh validate_yaml
Expand Down Expand Up @@ -138,7 +138,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: |
Expand Down Expand Up @@ -224,7 +224,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites
Expand Down Expand Up @@ -296,7 +296,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites
Expand Down Expand Up @@ -368,7 +368,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: |
Expand Down Expand Up @@ -445,7 +445,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites
Expand Down Expand Up @@ -519,7 +519,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create cluster prerequisites
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites
Expand Down Expand Up @@ -597,7 +597,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: rook prereq
run: |
tests/scripts/localPathPV.sh $(lsblk --paths|awk '/14G/ {print $1}'| head -1)
Expand Down Expand Up @@ -687,7 +687,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: create LV on disk
run: tests/scripts/github-action-helper.sh create_LV_on_disk
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-flex-suite.yaml
Expand Up @@ -44,7 +44,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephFlexSuite
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-helm-suite.yaml
Expand Up @@ -51,7 +51,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephHelmSuite
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-mgr-suite.yaml
Expand Up @@ -45,7 +45,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephMgrSuite
run: |
Expand Down
Expand Up @@ -48,7 +48,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephMultiClusterDeploySuite
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-smoke-suite.yaml
Expand Up @@ -48,7 +48,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephSmokeSuite
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration-test-upgrade-suite.yaml
Expand Up @@ -48,7 +48,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephUpgradeSuite
run: |
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/integration-tests-on-release.yaml
Expand Up @@ -49,7 +49,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephFlexSuite
run: |
Expand Down Expand Up @@ -107,7 +107,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephHelmSuite
run: |
Expand Down Expand Up @@ -164,7 +164,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephMultiClusterDeploySuite
run: |
Expand Down Expand Up @@ -220,7 +220,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephSmokeSuite
run: |
Expand Down Expand Up @@ -275,7 +275,7 @@ jobs:
# set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version"
GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 build
docker images
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.9
docker tag $(docker images|awk '/build-/ {print $1}') rook/ceph:v1.6.10
- name: TestCephUpgradeSuite
run: |
Expand Down
2 changes: 1 addition & 1 deletion Documentation/cassandra.md
Expand Up @@ -21,7 +21,7 @@ To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [fo
First deploy the Rook Cassandra Operator using the following commands:

```console
$ git clone --single-branch --branch v1.6.9 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/cassandra
kubectl apply -f operator.yaml
```
Expand Down
6 changes: 3 additions & 3 deletions Documentation/ceph-common-issues.md
Expand Up @@ -995,15 +995,15 @@ You can see https://github.com/rook/rook/issues/7940 for more detailed informati

### Solution
#### Recover from corruption (v1.6.0-v1.6.7)
If you are using Rook v1.6, you must first update to v1.6.9 or higher to avoid further incidents of
If you are using Rook v1.6, you must first update to v1.6.10 or higher to avoid further incidents of
OSD corruption caused by these Atari partitions.

An old workaround suggested using `deviceFilter: ^sd[a-z]+$`, but this still results in unexpected
partitions. Rook will merely stop creating new OSDs on the partitions. It does not fix a related
issue that `ceph-volume` that is unaware of the Atari partition problem. Users who used this
workaround are still at risk for OSD failures in the future.

To resolve the issue, immediately update to v1.6.9 or higher. After the update, no corruption should
To resolve the issue, immediately update to v1.6.10 or higher. After the update, no corruption should
occur on OSDs created in the future. Next, to get back to a healthy Ceph cluster state, focus on one
corruped disk at a time and [remove all OSDs on each corrupted disk](ceph-osd-mgmt.md#remove-an-osd)
one disk at a time.
Expand All @@ -1024,4 +1024,4 @@ as well as a second corrupted disk `/dev/sde` with one unexpected partition (`/d
5. Now Repeat steps 1-4 for `/dev/sde` and `/dev/sde2`, and continue for any other corruped disks.

If your Rook-Ceph cluster does not have any critical data stored in it, it may be simpler to
uninstall Rook completely and redeploy with v1.6.9 or higher.
uninstall Rook completely and redeploy with v1.6.10 or higher.
2 changes: 1 addition & 1 deletion Documentation/ceph-monitoring.md
Expand Up @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will
From the root of your locally cloned Rook repo, go the monitoring directory:

```console
$ git clone --single-branch --branch v1.6.9 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph/monitoring
```

Expand Down
2 changes: 1 addition & 1 deletion Documentation/ceph-quickstart.md
Expand Up @@ -50,7 +50,7 @@ If the `FSTYPE` field is not empty, there is a filesystem on top of the correspo
If you're feeling lucky, a simple Rook cluster can be created with the following kubectl commands and [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). For the more detailed install, skip to the next section to [deploy the Rook operator](#deploy-the-rook-operator).

```console
$ git clone --single-branch --branch v1.6.9 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml
Expand Down
6 changes: 3 additions & 3 deletions Documentation/ceph-toolbox.md
Expand Up @@ -43,7 +43,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: rook/ceph:v1.6.9
image: rook/ceph:v1.6.10
command: ["/tini"]
args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
imagePullPolicy: IfNotPresent
Expand Down Expand Up @@ -133,7 +133,7 @@ spec:
spec:
initContainers:
- name: config-init
image: rook/ceph:v1.6.9
image: rook/ceph:v1.6.10
command: ["/usr/local/bin/toolbox.sh"]
args: ["--skip-watch"]
imagePullPolicy: IfNotPresent
Expand All @@ -155,7 +155,7 @@ spec:
mountPath: /etc/rook
containers:
- name: script
image: rook/ceph:v1.6.9
image: rook/ceph:v1.6.10
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
Expand Down
30 changes: 15 additions & 15 deletions Documentation/ceph-upgrade.md
Expand Up @@ -52,12 +52,12 @@ With this upgrade guide, there are a few notes to consider:

Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to
another are as simple as updating the common resources and the image of the Rook operator. For
example, when Rook v1.6.9 is released, the process of updating from v1.6.0 is as simple as running
example, when Rook v1.6.10 is released, the process of updating from v1.6.0 is as simple as running
the following:

First get the latest common resources manifests that contain the latest changes for Rook v1.6.
```sh
git clone --single-branch --depth=1 --branch v1.6.9 https://github.com/rook/rook.git
git clone --single-branch --depth=1 --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph
```

Expand All @@ -68,7 +68,7 @@ section for instructions on how to change the default namespaces in `common.yaml
Then apply the latest changes from v1.6 and update the Rook Operator image.
```console
kubectl apply -f common.yaml -f crds.yaml
kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.9
kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.10
```

As exemplified above, it is a good practice to update Rook-Ceph common resources from the example
Expand Down Expand Up @@ -248,7 +248,7 @@ Any pod that is using a Rook volume should also remain healthy:
## Rook Operator Upgrade Process

In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.5.9` to
the version `v1.6.9`. This upgrade should work from any official patch release of Rook v1.5 to any
the version `v1.6.10`. This upgrade should work from any official patch release of Rook v1.5 to any
official patch release of v1.6.

**Rook release from `master` are expressly unsupported.** It is strongly recommended that you use
Expand Down Expand Up @@ -282,7 +282,7 @@ needed by the Operator. Also update the Custom Resource Definitions (CRDs).
First get the latest common resources manifests that contain the latest changes for Rook v1.6.
```sh
git clone --single-branch --depth=1 --branch v1.6.9 https://github.com/rook/rook.git
git clone --single-branch --depth=1 --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph
```

Expand Down Expand Up @@ -338,7 +338,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd
When the operator is updated, it will proceed to update all of the Ceph daemons.

```sh
kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.9
kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.10
```

## 4. Wait for the upgrade to complete
Expand All @@ -354,17 +354,17 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=
```

As an example, this cluster is midway through updating the OSDs from v1.5 to v1.6. When all
deployments report `1/1/1` availability and `rook-version=v1.6.9`, the Ceph cluster's core
deployments report `1/1/1` availability and `rook-version=v1.6.10`, the Ceph cluster's core
components are fully updated.

>```
>Every 2.0s: kubectl -n rook-ceph get deployment -o j...
>
>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.6.9
>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.6.9
>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.6.9
>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.6.9
>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.6.9
>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.6.10
>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.6.10
>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.6.10
>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.6.10
>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.6.10
>rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.5.9
>rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.5.9
>```
Expand All @@ -376,14 +376,14 @@ An easy check to see if the upgrade is totally finished is to check that there i
# kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq
This cluster is not yet finished:
rook-version=v1.5.9
rook-version=v1.6.9
rook-version=v1.6.10
This cluster is finished:
rook-version=v1.6.9
rook-version=v1.6.10
```

## 5. Verify the updated cluster

At this point, your Rook operator should be running version `rook/ceph:v1.6.9`.
At this point, your Rook operator should be running version `rook/ceph:v1.6.10`.

Verify the Ceph cluster's health using the [health verification section](#health-verification).

Expand Down
2 changes: 1 addition & 1 deletion Documentation/nfs.md
Expand Up @@ -23,7 +23,7 @@ You can read further about the details and limitations of these volumes in the [
First deploy the Rook NFS operator using the following commands:

```console
$ git clone --single-branch --branch v1.6.9 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/nfs
kubectl create -f common.yaml
kubectl create -f operator.yaml
Expand Down
2 changes: 1 addition & 1 deletion cluster/examples/kubernetes/cassandra/operator.yaml
Expand Up @@ -185,7 +185,7 @@ spec:
serviceAccountName: rook-cassandra-operator
containers:
- name: rook-cassandra-operator
image: rook/cassandra:v1.6.9
image: rook/cassandra:v1.6.10
imagePullPolicy: "Always"
args: ["cassandra", "operator"]
env:
Expand Down

0 comments on commit f793f94

Please sign in to comment.