Skip to content

Commit

Permalink
Merge pull request rook#8743 from leseb/next-pacific
Browse files Browse the repository at this point in the history
ceph: use next ceph v16.2.6 pacific version
  • Loading branch information
leseb committed Sep 21, 2021
2 parents 404dccd + c1a88f3 commit fea9f35
Show file tree
Hide file tree
Showing 188 changed files with 1,333 additions and 1,348 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/.yamllint
@@ -1,9 +1,9 @@
extends: default
ignore: |
cluster/examples/kubernetes/ceph/csi/template
cluster/examples/kubernetes/ceph/crds.yaml
cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml
cluster/examples/kubernetes/ceph/monitoring/
deploy/examples/csi/template
deploy/examples/crds.yaml
deploy/examples/pre-k8s-1.16/crds.yaml
deploy/examples/monitoring/
rules:
line-length: disable
new-lines: disable
Expand Down
108 changes: 54 additions & 54 deletions .github/workflows/canary-integration-test-arm64.yml
@@ -1,7 +1,7 @@
name: Canary integration tests ARM64
on:
schedule:
- cron: '0 0 * * *' # every day at midnight
- cron: "0 0 * * *" # every day at midnight

defaults:
run:
Expand All @@ -16,67 +16,67 @@ jobs:
BLOCK: /dev/sdb

steps:
- name: checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: checkout
uses: actions/checkout@v2
with:
fetch-depth: 0

- name: setup golang
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: setup golang
uses: actions/setup-go@v2
with:
go-version: 1.16

- name: teardown minikube and docker
run: |
uptime
minikube delete
docker system prune -a
- name: teardown minikube and docker
run: |
uptime
minikube delete
docker system prune -a
- name: setup minikube
run: |
# sudo apt-get install build-essential -y
# curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64
# sudo install minikube-linux-arm64 /usr/local/bin/minikube
# sudo rm -f minikube-linux-arm64
# curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"
# sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
minikube start --memory 28g --cpus=12 --driver=docker
- name: setup minikube
run: |
# sudo apt-get install build-essential -y
# curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64
# sudo install minikube-linux-arm64 /usr/local/bin/minikube
# sudo rm -f minikube-linux-arm64
# curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"
# sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
minikube start --memory 28g --cpus=12 --driver=docker
- name: print k8s cluster status
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status
- name: print k8s cluster status
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status

- name: use local disk and create partitions for osds
run: |
tests/scripts/github-action-helper.sh use_local_disk
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 1
- name: use local disk and create partitions for osds
run: |
tests/scripts/github-action-helper.sh use_local_disk
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 1
- name: validate-yaml
run: tests/scripts/github-action-helper.sh validate_yaml
- name: validate-yaml
run: tests/scripts/github-action-helper.sh validate_yaml

- name: deploy cluster
run: |
# removing liveness probes since the env is slow and the probe is killing the daemons
yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true
yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true
yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true
tests/scripts/github-action-helper.sh deploy_cluster
# there are no package for arm64 nfs-ganesha
kubectl delete -f cluster/examples/kubernetes/ceph/nfs-test.yaml
- name: deploy cluster
run: |
# removing liveness probes since the env is slow and the probe is killing the daemons
yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true
yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true
yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true
tests/scripts/github-action-helper.sh deploy_cluster
# there are no package for arm64 nfs-ganesha
kubectl delete -f deploy/examples/nfs-test.yaml
- name: wait for prepare pod
run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator
- name: wait for prepare pod
run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator

- name: wait for ceph to be ready
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 1
- name: wait for ceph to be ready
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 1

- name: teardown minikube and docker
run: |
minikube delete
docker system prune -a
- name: teardown minikube and docker
run: |
minikube delete
docker system prune -a
- name: upload canary test result
uses: actions/upload-artifact@v2
if: always()
with:
name: canary-arm64
path: test
- name: upload canary test result
uses: actions/upload-artifact@v2
if: always()
with:
name: canary-arm64
path: test

0 comments on commit fea9f35

Please sign in to comment.