From 32acb6ddb1e9f980f34a3f9e341b05a2d14c064d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 20 Sep 2021 11:27:40 +0200 Subject: [PATCH] ci: add daily jobs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have new jobs now: * one that runs both smoke and object on the next Pacific version * one that runs both smoke and object on Ceph master * one that tests the upgrade from the current pacific stable to the pacific devel * one that tests the upgrade from the current octopus stable to the octopus devel Signed-off-by: Sébastien Han --- .../canary-integration-test-arm64.yml | 82 ----- .github/workflows/daily-nightly-jobs.yml | 338 ++++++++++++++++++ tests/framework/installer/ceph_installer.go | 18 +- tests/integration/ceph_object_test.go | 2 +- tests/integration/ceph_smoke_test.go | 2 +- tests/integration/ceph_upgrade_test.go | 161 ++++++--- 6 files changed, 463 insertions(+), 140 deletions(-) delete mode 100644 .github/workflows/canary-integration-test-arm64.yml create mode 100644 .github/workflows/daily-nightly-jobs.yml diff --git a/.github/workflows/canary-integration-test-arm64.yml b/.github/workflows/canary-integration-test-arm64.yml deleted file mode 100644 index d9c79c6ccdd38..0000000000000 --- a/.github/workflows/canary-integration-test-arm64.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Canary integration tests ARM64 -on: - schedule: - - cron: '0 0 * * *' # every day at midnight - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - canary-arm64: - runs-on: [self-hosted, ubuntu-20.04, ARM64] - if: github.repository == 'rook/rook' - env: - BLOCK: /dev/sdb - - steps: - - name: checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: teardown minikube and docker - run: | - uptime - minikube delete - docker system prune -a - - - name: setup minikube - run: | - # sudo apt-get install build-essential -y - # curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64 - # sudo install minikube-linux-arm64 /usr/local/bin/minikube - # sudo rm -f minikube-linux-arm64 - # curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" - # sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - minikube start --memory 28g --cpus=12 --driver=docker - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: use local disk and create partitions for osds - run: | - tests/scripts/github-action-helper.sh use_local_disk - tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 1 - - - name: validate-yaml - run: tests/scripts/github-action-helper.sh validate_yaml - - - name: deploy cluster - run: | - # removing liveness probes since the env is slow and the probe is killing the daemons - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true - tests/scripts/github-action-helper.sh deploy_cluster - # there are no package for arm64 nfs-ganesha - kubectl delete -f cluster/examples/kubernetes/ceph/nfs-test.yaml - - - name: wait for prepare pod - run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator - - - name: wait for ceph to be ready - run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 1 - - - name: teardown minikube and docker - run: | - minikube delete - docker system prune -a - - - name: upload canary test result - uses: actions/upload-artifact@v2 - if: always() - with: - name: canary-arm64 - path: test diff --git a/.github/workflows/daily-nightly-jobs.yml b/.github/workflows/daily-nightly-jobs.yml new file mode 100644 index 0000000000000..6d7f9a8785c57 --- /dev/null +++ b/.github/workflows/daily-nightly-jobs.yml @@ -0,0 +1,338 @@ +name: Daily nightly jobs +on: + # TODO: remove me when code has been validated against a PR and uncomment the "schedule" below + pull_request: + branches: + - master + # schedule: + # - cron: "0 0 * * *" # every day at midnight + +defaults: + run: + # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell + shell: bash --noprofile --norc -eo pipefail -x {0} + +jobs: + canary-arm64: + runs-on: [self-hosted, ubuntu-20.04, ARM64] + if: github.repository == 'rook/rook' + env: + BLOCK: /dev/sdb + + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: teardown minikube and docker + run: | + uptime + minikube delete + docker system prune -a + + - name: setup minikube + run: | + # sudo apt-get install build-essential -y + # curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64 + # sudo install minikube-linux-arm64 /usr/local/bin/minikube + # sudo rm -f minikube-linux-arm64 + # curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + # sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + minikube start --memory 28g --cpus=12 --driver=docker + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk and create partitions for osds + run: | + tests/scripts/github-action-helper.sh use_local_disk + tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 1 + + - name: validate-yaml + run: tests/scripts/github-action-helper.sh validate_yaml + + - name: deploy cluster + run: | + # removing liveness probes since the env is slow and the probe is killing the daemons + yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true + yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true + yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true + tests/scripts/github-action-helper.sh deploy_cluster + # there are no package for arm64 nfs-ganesha + kubectl delete -f cluster/examples/kubernetes/ceph/nfs-test.yaml + + - name: wait for prepare pod + run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator + + - name: wait for ceph to be ready + run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 1 + + - name: teardown minikube and docker + run: | + minikube delete + docker system prune -a + + - name: upload canary test result + uses: actions/upload-artifact@v2 + if: always() + with: + name: canary-arm64 + path: test + + smoke-suite-pacific-devel: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephSmokeSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="pacific-devel" go test -v -timeout 1800s -run TestCephSmokeSuite github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-smoke-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + + smoke-suite-ceph-master: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephSmokeSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION=master go test -v -timeout 1800s -run TestCephSmokeSuite github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-smoke-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + + object-suite-pacific-devel: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephObjectSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="pacific-devel" go test -v -timeout 1800s -failfast -run TestCephObjectSuite github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-object-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + + object-suite-master: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephObjectSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION=master go test -v -timeout 1800s -failfast -run TestCephObjectSuite github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-object-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + + upgrade-from-pacific-stable-to-pacific-devel: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephUpgradeSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -failfast -run TestCephUpgradeSuite/TestUpgradeToPacificDevel github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-upgrade-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ + + upgrade-from-octopus-stable-to-octopus-devel: + runs-on: ubuntu-18.04 + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: setup golang + uses: actions/setup-go@v2 + with: + go-version: 1.16 + + - name: setup minikube + uses: manusa/actions-setup-minikube@v2.4.2 + with: + minikube version: "v1.22.0" + kubernetes version: "v1.22.0" + start args: --memory 6g --cpus=2 + github token: ${{ secrets.GITHUB_TOKEN }} + + - name: print k8s cluster status + run: tests/scripts/github-action-helper.sh print_k8s_cluster_status + + - name: use local disk + run: tests/scripts/github-action-helper.sh use_local_disk_for_integration_test + + - name: build rook + run: tests/scripts/github-action-helper.sh build_rook + + - name: TestCephUpgradeSuite + run: | + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -failfast -run TestCephUpgradeSuite/TestUpgradeToOctopusDevel github.com/rook/rook/tests/integration + + - name: Artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: ceph-upgrade-suite-artifact + path: /home/runner/work/rook/rook/tests/integration/_output/tests/ diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go index 3615b61acc8be..cc42cb0e4e4d2 100644 --- a/tests/framework/installer/ceph_installer.go +++ b/tests/framework/installer/ceph_installer.go @@ -51,8 +51,11 @@ const ( octopusTestImage = "quay.io/ceph/ceph:v15" // test with the latest pacific build pacificTestImage = "quay.io/ceph/ceph:v16.2.5" + // test with the current development version of Pacific + pacificDevelTestImage = "quay.io/ceph/daemon-base:latest-pacific-devel" + octopusDevelTestImage = "quay.io/ceph/daemon-base:latest-octopus-devel" // test with the latest master image - masterTestImage = "ceph/daemon-base:latest-master-devel" + masterTestImage = "quay.io/ceph/daemon-base:latest-master-devel" cephOperatorLabel = "app=rook-ceph-operator" defaultclusterName = "test-cluster" @@ -67,7 +70,9 @@ var ( NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} + OctopusDevelVersion = cephv1.CephVersionSpec{Image: octopusDevelTestImage} PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} + PacificDevelVersion = cephv1.CephVersionSpec{Image: pacificDevelTestImage} MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} ) @@ -83,6 +88,17 @@ type CephInstaller struct { T func() *testing.T } +func ReturnCephVersion() cephv1.CephVersionSpec { + switch os.Getenv("CEPH_SUITE_VERSION") { + case "master": + return MasterVersion + case "pacific-devel": + return PacificDevelVersion + default: + return PacificVersion + } +} + // CreateCephOperator creates rook-operator via kubectl func (h *CephInstaller) CreateCephOperator() (err error) { // creating rook resources diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go index f59eb9853db02..d59fadaf89935 100644 --- a/tests/integration/ceph_object_test.go +++ b/tests/integration/ceph_object_test.go @@ -76,7 +76,7 @@ func (s *ObjectSuite) SetupSuite() { EnableAdmissionController: true, UseCrashPruner: true, RookVersion: installer.VersionMaster, - CephVersion: installer.PacificVersion, + CephVersion: installer.ReturnCephVersion(), } s.settings.ApplyEnvVars() s.installer, s.k8sh = StartTestCluster(s.T, s.settings) diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go index 6eb47fac8d97d..bcb7876e587ae 100644 --- a/tests/integration/ceph_smoke_test.go +++ b/tests/integration/ceph_smoke_test.go @@ -100,7 +100,7 @@ func (s *SmokeSuite) SetupSuite() { EnableAdmissionController: true, UseCrashPruner: true, RookVersion: installer.VersionMaster, - CephVersion: installer.PacificVersion, + CephVersion: installer.ReturnCephVersion(), } s.settings.ApplyEnvVars() s.installer, s.k8sh = StartTestCluster(s.T, s.settings) diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go index beb3aec14532e..9d9b794b9cc63 100644 --- a/tests/integration/ceph_upgrade_test.go +++ b/tests/integration/ceph_upgrade_test.go @@ -97,6 +97,111 @@ func (s *UpgradeSuite) TearDownSuite() { } func (s *UpgradeSuite) TestUpgradeToMaster() { + message := "my simple message" + objectStoreName := "upgraded-object" + objectUserID := "upgraded-user" + preFilename := "pre-upgrade-file" + numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) + s.settings.CephVersion = installer.NautilusVersion + + // + // Upgrade Rook from v1.6 to master + // + logger.Infof("*** UPGRADING ROOK FROM %s to master ***", installer.Version1_6) + s.gatherLogs(s.settings.OperatorNamespace, "_before_master_upgrade") + s.upgradeToMaster() + + s.verifyOperatorImage(installer.VersionMaster) + s.verifyRookUpgrade(numOSDs) + err := s.installer.WaitForToolbox(s.namespace) + assert.NoError(s.T(), err) + + logger.Infof("Done with automatic upgrade from %s to master", installer.Version1_6) + newFile := "post-upgrade-1_6-to-master-file" + s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) + rbdFilesToRead = append(rbdFilesToRead, newFile) + cephfsFilesToRead = append(cephfsFilesToRead, newFile) + + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) + + // should be Bound after upgrade to Rook master + // do not need retry b/c the OBC controller runs parallel to Rook-Ceph orchestration + assert.True(s.T(), s.helper.BucketClient.CheckOBC(obcName, "bound")) + + logger.Infof("Verified upgrade from %s to master", installer.Version1_6) + + // + // Upgrade from nautilus to octopus + // + logger.Infof("*** UPGRADING CEPH FROM Nautilus TO Octopus ***") + s.gatherLogs(s.settings.OperatorNamespace, "_before_octopus_upgrade") + s.upgradeCephVersion(installer.OctopusVersion.Image, numOSDs) + // Verify reading and writing to the test clients + newFile = "post-octopus-upgrade-file" + s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) + logger.Infof("Verified upgrade from nautilus to octopus") + + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) + + // + // Upgrade from octopus to pacific + // + logger.Infof("*** UPGRADING CEPH FROM OCTOPUS TO PACIFIC ***") + s.gatherLogs(s.settings.OperatorNamespace, "_before_pacific_upgrade") + s.upgradeCephVersion(installer.PacificVersion.Image, numOSDs) + // Verify reading and writing to the test clients + newFile = "post-pacific-upgrade-file" + s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) + logger.Infof("Verified upgrade from octopus to pacific") + + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) +} + +func (s *UpgradeSuite) TestUpgradeToOctopusDevel() { + message := "my simple message" + objectStoreName := "upgraded-object" + objectUserID := "upgraded-user" + preFilename := "pre-upgrade-file" + s.settings.CephVersion = installer.OctopusVersion + numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) + + // + // Upgrade from octopus to octopus + // + logger.Infof("*** UPGRADING CEPH FROM OCTOPUS STABLE TO OCTOPUS DEVEL ***") + s.gatherLogs(s.settings.OperatorNamespace, "_before_pacific_upgrade") + s.upgradeCephVersion(installer.OctopusDevelVersion.Image, numOSDs) + // Verify reading and writing to the test clients + newFile := "post-octopus-upgrade-file" + s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) + logger.Infof("Verified upgrade from octopus stable to octopus devel") + + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) +} + +func (s *UpgradeSuite) TestUpgradeToPacificDevel() { + message := "my simple message" + objectStoreName := "upgraded-object" + objectUserID := "upgraded-user" + preFilename := "pre-upgrade-file" + s.settings.CephVersion = installer.PacificVersion + numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) + + // + // Upgrade from octopus to pacific + // + logger.Infof("*** UPGRADING CEPH FROM PACIFIC STABLE TO PACIFIC DEVEL ***") + s.gatherLogs(s.settings.OperatorNamespace, "_before_pacific_upgrade") + s.upgradeCephVersion(installer.PacificDevelVersion.Image, numOSDs) + // Verify reading and writing to the test clients + newFile := "post-pacific-upgrade-file" + s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) + logger.Infof("Verified upgrade from pacific stable to pacific devel") + + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) +} + +func (s *UpgradeSuite) deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename string) (int, string, []string, []string) { // // Create block, object, and file storage before the upgrade // @@ -130,11 +235,9 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { }() logger.Infof("Initializing object before the upgrade") - objectStoreName := "upgraded-object" runObjectE2ETestLite(s.helper, s.k8sh, s.Suite, s.settings, objectStoreName, 1, false) logger.Infof("Initializing object user before the upgrade") - objectUserID := "upgraded-user" createCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, false, false) logger.Info("Initializing object bucket claim before the upgrade") @@ -160,8 +263,6 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { // verify that we're actually running the right pre-upgrade image s.verifyOperatorImage(installer.Version1_6) - message := "my simple message" - preFilename := "pre-upgrade-file" assert.NoError(s.T(), s.k8sh.WriteToPod("", rbdPodName, preFilename, message)) assert.NoError(s.T(), s.k8sh.ReadFromPod("", rbdPodName, preFilename, message)) @@ -176,57 +277,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { numOSDs := len(osdDeps) // there should be this many upgraded OSDs require.NotEqual(s.T(), 0, numOSDs) - // - // Upgrade Rook from v1.6 to master - // - logger.Infof("*** UPGRADING ROOK FROM %s to master ***", installer.Version1_6) - s.gatherLogs(s.settings.OperatorNamespace, "_before_master_upgrade") - s.upgradeToMaster() - - s.verifyOperatorImage(installer.VersionMaster) - s.verifyRookUpgrade(numOSDs) - err = s.installer.WaitForToolbox(s.namespace) - assert.NoError(s.T(), err) - - logger.Infof("Done with automatic upgrade from %s to master", installer.Version1_6) - newFile := "post-upgrade-1_6-to-master-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - rbdFilesToRead = append(rbdFilesToRead, newFile) - cephfsFilesToRead = append(cephfsFilesToRead, newFile) - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) - - // should be Bound after upgrade to Rook master - // do not need retry b/c the OBC controller runs parallel to Rook-Ceph orchestration - assert.True(s.T(), s.helper.BucketClient.CheckOBC(obcName, "bound")) - - logger.Infof("Verified upgrade from %s to master", installer.Version1_6) - - // - // Upgrade from nautilus to octopus - // - logger.Infof("*** UPGRADING CEPH FROM Nautilus TO Octopus ***") - s.gatherLogs(s.settings.OperatorNamespace, "_before_octopus_upgrade") - s.upgradeCephVersion(installer.OctopusVersion.Image, numOSDs) - // Verify reading and writing to the test clients - newFile = "post-octopus-upgrade-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - logger.Infof("Verified upgrade from nautilus to octopus") - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) - - // - // Upgrade from octopus to pacific - // - logger.Infof("*** UPGRADING CEPH FROM OCTOPUS TO PACIFIC ***") - s.gatherLogs(s.settings.OperatorNamespace, "_before_pacific_upgrade") - s.upgradeCephVersion(installer.PacificVersion.Image, numOSDs) - // Verify reading and writing to the test clients - newFile = "post-pacific-upgrade-file" - s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) - logger.Infof("Verified upgrade from octopus to pacific") - - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) + return numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead } func (s *UpgradeSuite) gatherLogs(systemNamespace, testSuffix string) {