From c890710b635e2cd2ae64c08bac0621d89e79c0fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 22 Sep 2021 10:37:07 +0200 Subject: [PATCH] core: change directory layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per discussion, proposing a new layout for the charts/yaml/olm files. ./deploy ├── charts │ ├── rook-ceph │ │ └── templates │ └── rook-ceph-cluster │ └── templates ├── examples │ ├── csi │ │ ├── cephfs │ │ └── rbd │ ├── flex │ ├── monitoring │ ├── pre-k8s-1.16 └── olm └── assemble Signed-off-by: Sébastien Han --- .github/workflows/.yamllint | 6 +- .github/workflows/canary-integration-test.yml | 66 +++++++++---------- .github/workflows/daily-nightly-jobs.yml | 8 +-- .github/workflows/helm-lint.yaml | 2 +- .../workflows/rgw-multisite-test/action.yml | 8 +-- .github/workflows/yaml-lint.yaml | 2 +- .gitignore | 4 +- Documentation/admission-controller-usage.md | 5 +- Documentation/authenticated-registry.md | 4 +- Documentation/ceph-advanced-configuration.md | 4 +- Documentation/ceph-block.md | 13 ++-- Documentation/ceph-cluster-crd.md | 14 ++-- Documentation/ceph-csi-drivers.md | 2 +- Documentation/ceph-csi-snapshot.md | 37 +++++------ Documentation/ceph-csi-volume-clone.md | 12 ++-- Documentation/ceph-examples.md | 20 +++--- Documentation/ceph-filesystem-crd.md | 8 +-- Documentation/ceph-filesystem.md | 4 +- Documentation/ceph-fs-mirror-crd.md | 2 +- Documentation/ceph-monitoring.md | 6 +- Documentation/ceph-object-multisite.md | 6 +- Documentation/ceph-openshift-issues.md | 2 +- Documentation/ceph-openshift.md | 4 +- Documentation/ceph-osd-mgmt.md | 3 +- Documentation/ceph-rbd-mirror-crd.md | 2 +- Documentation/ceph-toolbox.md | 4 +- Documentation/ceph-upgrade.md | 8 +-- Documentation/direct-tools.md | 2 +- Documentation/helm-ceph-cluster.md | 4 +- Documentation/helm-operator.md | 4 +- Documentation/helm.md | 2 +- Documentation/pod-security-policies.md | 4 +- Documentation/quickstart.md | 17 ++--- Documentation/rbd-mirroring.md | 6 +- PendingReleaseNotes.md | 3 + build/crds/build-crds.sh | 10 +-- build/makelib/helm.mk | 2 +- build/rbac/get-helm-rbac.sh | 2 +- cluster/olm/ceph/README.md | 24 ------- {cluster => deploy}/charts/library/Chart.yaml | 0 .../templates/_cluster-clusterrolebinding.tpl | 0 .../library/templates/_cluster-monitoring.tpl | 0 .../charts/library/templates/_cluster-psp.tpl | 0 .../library/templates/_cluster-role.tpl | 0 .../templates/_cluster-rolebinding.tpl | 0 .../templates/_cluster-serviceaccount.tpl | 0 .../library/templates/_imagepullsecret.tpl | 0 .../templates/_suffix-cluster-namespace.tpl | 0 .../charts/rook-ceph-cluster/.helmignore | 0 .../charts/rook-ceph-cluster/Chart.yaml | 0 .../charts/rook-ceph-cluster/README.md | 0 .../charts/rook-ceph-cluster/charts/library | 0 .../rook-ceph-cluster/templates/NOTES.txt | 0 .../rook-ceph-cluster/templates/_helpers.tpl | 0 .../templates/cephblockpool.yaml | 0 .../templates/cephcluster.yaml | 0 .../templates/cephfilesystem.yaml | 0 .../templates/cephobjectstore.yaml | 0 .../templates/configmap.yaml | 0 .../templates/deployment.yaml | 0 .../rook-ceph-cluster/templates/ingress.yaml | 0 .../rook-ceph-cluster/templates/rbac.yaml | 0 .../charts/rook-ceph-cluster/values.yaml | 0 .../charts/rook-ceph/.helmignore | 0 .../charts/rook-ceph/Chart.yaml | 0 .../charts/rook-ceph/README.md | 0 .../charts/rook-ceph/charts/library | 0 .../charts/rook-ceph/templates/NOTES.txt | 0 .../rook-ceph/templates/cluster-rbac.yaml | 0 .../rook-ceph/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../rook-ceph/templates/deployment.yaml | 0 .../charts/rook-ceph/templates/psp.yaml | 0 .../charts/rook-ceph/templates/resources.yaml | 0 .../charts/rook-ceph/templates/role.yaml | 0 .../rook-ceph/templates/rolebinding.yaml | 0 .../rook-ceph/templates/serviceaccount.yaml | 0 .../charts/rook-ceph/values.yaml | 2 +- .../kubernetes => deploy/examples}/README.md | 0 .../bucket-notification-endpoint.yaml | 0 .../examples}/bucket-notification.yaml | 0 .../examples}/bucket-topic.yaml | 0 .../ceph => deploy/examples}/ceph-client.yaml | 0 .../cluster-external-management.yaml | 0 .../examples}/cluster-external.yaml | 0 .../examples}/cluster-on-local-pvc.yaml | 0 .../examples}/cluster-on-pvc.yaml | 0 .../examples}/cluster-stretched-aws.yaml | 0 .../examples}/cluster-stretched.yaml | 0 .../examples}/cluster-test.yaml | 0 .../ceph => deploy/examples}/cluster.yaml | 0 .../examples}/common-external.yaml | 0 .../examples}/common-second-cluster.yaml | 0 .../ceph => deploy/examples}/common.yaml | 0 .../ceph => deploy/examples}/crds.yaml | 0 .../create-external-cluster-resources.py | 0 .../create-external-cluster-resources.sh | 6 +- .../examples}/csi/cephfs/kube-registry.yaml | 0 .../examples}/csi/cephfs/pod-ephemeral.yaml | 0 .../examples}/csi/cephfs/pod.yaml | 0 .../examples}/csi/cephfs/pvc-clone.yaml | 0 .../examples}/csi/cephfs/pvc-restore.yaml | 0 .../examples}/csi/cephfs/pvc.yaml | 0 .../examples}/csi/cephfs/snapshot.yaml | 0 .../examples}/csi/cephfs/snapshotclass.yaml | 0 .../examples}/csi/cephfs/storageclass-ec.yaml | 0 .../examples}/csi/cephfs/storageclass.yaml | 0 .../examples}/csi/rbd/pod-ephemeral.yaml | 0 .../ceph => deploy/examples}/csi/rbd/pod.yaml | 0 .../examples}/csi/rbd/pvc-clone.yaml | 0 .../examples}/csi/rbd/pvc-restore.yaml | 0 .../ceph => deploy/examples}/csi/rbd/pvc.yaml | 0 .../examples}/csi/rbd/snapshot.yaml | 0 .../examples}/csi/rbd/snapshotclass.yaml | 0 .../examples}/csi/rbd/storageclass-ec.yaml | 0 .../examples}/csi/rbd/storageclass-test.yaml | 0 .../examples}/csi/rbd/storageclass.yaml | 0 .../examples}/dashboard-external-http.yaml | 0 .../examples}/dashboard-external-https.yaml | 0 .../examples}/dashboard-ingress-https.yaml | 0 .../examples}/dashboard-loadbalancer.yaml | 0 .../examples}/direct-mount.yaml | 0 .../examples}/filesystem-ec.yaml | 0 .../examples}/filesystem-mirror.yaml | 0 .../examples}/filesystem-test.yaml | 0 .../ceph => deploy/examples}/filesystem.yaml | 0 .../ceph => deploy/examples}/images.txt | 0 .../examples}/import-external-cluster.sh | 0 .../csi-metrics-service-monitor.yaml | 0 .../examples}/monitoring/keda-rgw.yaml | 0 .../prometheus-ceph-v14-rules-external.yaml | 0 .../monitoring/prometheus-ceph-v14-rules.yaml | 0 .../prometheus-ceph-v15-rules-external.yaml | 0 .../monitoring/prometheus-ceph-v15-rules.yaml | 0 .../prometheus-ceph-v16-rules-external.yaml | 0 .../monitoring/prometheus-ceph-v16-rules.yaml | 0 .../monitoring/prometheus-service.yaml | 0 .../examples}/monitoring/prometheus.yaml | 0 .../examples}/monitoring/rbac.yaml | 0 .../examples}/monitoring/service-monitor.yaml | 0 .../kubernetes => deploy/examples}/mysql.yaml | 0 .../ceph => deploy/examples}/nfs-test.yaml | 0 .../ceph => deploy/examples}/nfs.yaml | 0 .../examples}/object-bucket-claim-delete.yaml | 0 .../object-bucket-claim-notification.yaml | 0 .../examples}/object-bucket-claim-retain.yaml | 0 .../ceph => deploy/examples}/object-ec.yaml | 0 .../examples}/object-external.yaml | 0 .../object-multisite-pull-realm-test.yaml | 0 .../object-multisite-pull-realm.yaml | 0 .../examples}/object-multisite-test.yaml | 0 .../examples}/object-multisite.yaml | 0 .../examples}/object-openshift.yaml | 0 .../ceph => deploy/examples}/object-test.yaml | 0 .../ceph => deploy/examples}/object-user.yaml | 0 .../ceph => deploy/examples}/object.yaml | 0 .../examples}/operator-openshift.yaml | 0 .../ceph => deploy/examples}/operator.yaml | 0 .../ceph => deploy/examples}/osd-purge.yaml | 0 .../ceph => deploy/examples}/pool-ec.yaml | 0 .../examples}/pool-mirrored.yaml | 0 .../ceph => deploy/examples}/pool-test.yaml | 0 .../ceph => deploy/examples}/pool.yaml | 0 .../ceph => deploy/examples}/rbdmirror.yaml | 0 .../examples}/rgw-external.yaml | 0 .../examples}/storageclass-bucket-delete.yaml | 0 .../examples}/storageclass-bucket-retain.yaml | 0 .../ceph => deploy/examples}/toolbox-job.yaml | 0 .../ceph => deploy/examples}/toolbox.yaml | 0 .../examples}/volume-replication-class.yaml | 0 .../examples}/volume-replication.yaml | 0 .../examples}/wordpress.yaml | 0 deploy/olm/README.md | 24 +++++++ .../olm}/assemble/metadata-common.yaml | 0 .../olm}/assemble/metadata-k8s.yaml | 0 .../olm}/assemble/metadata-ocp.yaml | 0 .../olm}/assemble/metadata-okd.yaml | 0 .../objectbucket.io_objectbucketclaims.yaml | 0 .../objectbucket.io_objectbuckets.yaml | 0 .../olm}/assemble/rook-ceph.package.yaml | 0 .../olm}/generate-rook-csv-templates.sh | 4 +- .../ceph => deploy/olm}/generate-rook-csv.sh | 11 ++-- design/ceph/ceph-nfs-ganesha.md | 3 +- design/ceph/filesystem.md | 2 +- design/ceph/object/store.md | 2 +- design/common/object-bucket.md | 21 +++--- images/ceph/Makefile | 20 +++--- pkg/operator/k8sutil/prometheus_test.go | 4 +- .../ceph/test-data => tests}/ceph-status-out | 0 tests/framework/installer/ceph_settings.go | 4 +- tests/framework/installer/settings.go | 19 ++++-- tests/framework/utils/helm_helper.go | 5 +- tests/scripts/auto-grow-storage.sh | 48 +++++++------- tests/scripts/github-action-helper.sh | 19 +++--- tests/scripts/multi-node/build-rook.sh | 2 +- 195 files changed, 275 insertions(+), 257 deletions(-) delete mode 100644 cluster/olm/ceph/README.md rename {cluster => deploy}/charts/library/Chart.yaml (100%) rename {cluster => deploy}/charts/library/templates/_cluster-clusterrolebinding.tpl (100%) rename {cluster => deploy}/charts/library/templates/_cluster-monitoring.tpl (100%) rename {cluster => deploy}/charts/library/templates/_cluster-psp.tpl (100%) rename {cluster => deploy}/charts/library/templates/_cluster-role.tpl (100%) rename {cluster => deploy}/charts/library/templates/_cluster-rolebinding.tpl (100%) rename {cluster => deploy}/charts/library/templates/_cluster-serviceaccount.tpl (100%) rename {cluster => deploy}/charts/library/templates/_imagepullsecret.tpl (100%) rename {cluster => deploy}/charts/library/templates/_suffix-cluster-namespace.tpl (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/.helmignore (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/Chart.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/README.md (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/charts/library (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/NOTES.txt (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/_helpers.tpl (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/cephblockpool.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/cephcluster.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/cephfilesystem.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/cephobjectstore.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/configmap.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/deployment.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/ingress.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/templates/rbac.yaml (100%) rename {cluster => deploy}/charts/rook-ceph-cluster/values.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/.helmignore (100%) rename {cluster => deploy}/charts/rook-ceph/Chart.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/README.md (100%) rename {cluster => deploy}/charts/rook-ceph/charts/library (100%) rename {cluster => deploy}/charts/rook-ceph/templates/NOTES.txt (100%) rename {cluster => deploy}/charts/rook-ceph/templates/cluster-rbac.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/clusterrole.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/clusterrolebinding.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/deployment.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/psp.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/resources.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/role.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/rolebinding.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/templates/serviceaccount.yaml (100%) rename {cluster => deploy}/charts/rook-ceph/values.yaml (99%) rename {cluster/examples/kubernetes => deploy/examples}/README.md (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/bucket-notification-endpoint.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/bucket-notification.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/bucket-topic.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/ceph-client.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-external-management.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-on-local-pvc.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-on-pvc.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-stretched-aws.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-stretched.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/cluster.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/common-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/common-second-cluster.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/common.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/crds.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/create-external-cluster-resources.py (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/create-external-cluster-resources.sh (97%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/kube-registry.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/pod-ephemeral.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/pod.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/pvc-clone.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/pvc-restore.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/pvc.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/snapshot.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/snapshotclass.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/storageclass-ec.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/cephfs/storageclass.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/pod-ephemeral.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/pod.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/pvc-clone.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/pvc-restore.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/pvc.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/snapshot.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/snapshotclass.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/storageclass-ec.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/storageclass-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/csi/rbd/storageclass.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/dashboard-external-http.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/dashboard-external-https.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/dashboard-ingress-https.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/dashboard-loadbalancer.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/direct-mount.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/filesystem-ec.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/filesystem-mirror.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/filesystem-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/filesystem.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/images.txt (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/import-external-cluster.sh (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/csi-metrics-service-monitor.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/keda-rgw.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v14-rules-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v14-rules.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v15-rules-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v15-rules.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v16-rules-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-ceph-v16-rules.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus-service.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/prometheus.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/rbac.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/monitoring/service-monitor.yaml (100%) rename {cluster/examples/kubernetes => deploy/examples}/mysql.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/nfs-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/nfs.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-bucket-claim-delete.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-bucket-claim-notification.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-bucket-claim-retain.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-ec.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-multisite-pull-realm-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-multisite-pull-realm.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-multisite-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-multisite.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-openshift.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object-user.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/object.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/operator-openshift.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/operator.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/osd-purge.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/pool-ec.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/pool-mirrored.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/pool-test.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/pool.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/rbdmirror.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/rgw-external.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/storageclass-bucket-delete.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/storageclass-bucket-retain.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/toolbox-job.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/toolbox.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/volume-replication-class.yaml (100%) rename {cluster/examples/kubernetes/ceph => deploy/examples}/volume-replication.yaml (100%) rename {cluster/examples/kubernetes => deploy/examples}/wordpress.yaml (100%) create mode 100644 deploy/olm/README.md rename {cluster/olm/ceph => deploy/olm}/assemble/metadata-common.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/metadata-k8s.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/metadata-ocp.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/metadata-okd.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/objectbucket.io_objectbucketclaims.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/objectbucket.io_objectbuckets.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/assemble/rook-ceph.package.yaml (100%) rename {cluster/olm/ceph => deploy/olm}/generate-rook-csv-templates.sh (90%) rename {cluster/olm/ceph => deploy/olm}/generate-rook-csv.sh (97%) rename {cluster/examples/kubernetes/ceph/test-data => tests}/ceph-status-out (100%) diff --git a/.github/workflows/.yamllint b/.github/workflows/.yamllint index 96a8081d7b61..44fa3ac3fc2c 100644 --- a/.github/workflows/.yamllint +++ b/.github/workflows/.yamllint @@ -1,8 +1,8 @@ extends: default ignore: | - cluster/examples/kubernetes/ceph/csi/template - cluster/examples/kubernetes/ceph/crds.yaml - cluster/examples/kubernetes/ceph/monitoring/ + deploy/examples/csi/template + deploy/examples/crds.yaml + deploy/examples/monitoring/ rules: line-length: disable new-lines: disable diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 5811ead6635a..a122002a1a61 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -55,8 +55,8 @@ jobs: mgr_raw=$(kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr) timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- curl --silent --show-error ${mgr_raw%%:*}:9283; do echo 'waiting for mgr prometheus exporter to be ready' && sleep 1; done" kubectl -n rook-ceph exec $toolbox -- mkdir -p /etc/ceph/test-data - kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/test-data/ceph-status-out $toolbox:/etc/ceph/test-data/ - kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/create-external-cluster-resources.py $toolbox:/etc/ceph + kubectl -n rook-ceph cp tests/ceph-status-out $toolbox:/etc/ceph/test-data/ + kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done" - name: run external script create-external-cluster-resources.py unit tests @@ -73,11 +73,11 @@ jobs: run: | kubectl -n rook-ceph delete deploy/rook-ceph-operator kubectl -n rook-ceph delete deploy/rook-ceph-osd-1 --grace-period=0 --force - sed -i 's//1/' cluster/examples/kubernetes/ceph/osd-purge.yaml + sed -i 's//1/' deploy/examples/osd-purge.yaml # the CI must force the deletion since we use replica 1 on 2 OSDs - sed -i 's/false/true/' cluster/examples/kubernetes/ceph/osd-purge.yaml - sed -i 's|rook/ceph:master|rook/ceph:local-build|' cluster/examples/kubernetes/ceph/osd-purge.yaml - kubectl -n rook-ceph create -f cluster/examples/kubernetes/ceph/osd-purge.yaml + sed -i 's/false/true/' deploy/examples/osd-purge.yaml + sed -i 's|rook/ceph:master|rook/ceph:local-build|' deploy/examples/osd-purge.yaml + kubectl -n rook-ceph create -f deploy/examples/osd-purge.yaml toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') kubectl -n rook-ceph exec $toolbox -- ceph status timeout 120 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph osd tree|grep -qE 'osd.1.*.destroyed'; do echo 'waiting for ceph osd 1 to be destroyed'; sleep 1; done" @@ -122,12 +122,12 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -174,11 +174,11 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -222,12 +222,12 @@ jobs: - name: deploy rook run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -274,11 +274,11 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -325,10 +325,10 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -375,11 +375,11 @@ jobs: - name: deploy rook run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -430,7 +430,7 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml cat tests/manifests/test-kms-vault.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec-token-auth.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 @@ -439,7 +439,7 @@ jobs: yq merge --inplace --arrays append tests/manifests/test-object.yaml tests/manifests/test-kms-vault-spec-token-auth.yaml yq write -i tests/manifests/test-object.yaml "spec.security.kms.connectionDetails.VAULT_BACKEND_PATH" rook/ver2 kubectl create -f tests/manifests/test-object.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -500,12 +500,12 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec-k8s-auth.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -554,10 +554,10 @@ jobs: - name: deploy cluster run: | - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -603,7 +603,7 @@ jobs: - name: deploy first cluster rook run: | tests/scripts/github-action-helper.sh deploy_first_rook_cluster - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ kubectl create -f rbdmirror.yaml -f filesystem-mirror.yaml # cephfs-mirroring is a push operation @@ -611,7 +611,7 @@ jobs: - name: deploy second cluster rook run: | tests/scripts/github-action-helper.sh deploy_second_rook_cluster - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ sed -i 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g' rbdmirror.yaml kubectl create -f rbdmirror.yaml @@ -623,7 +623,7 @@ jobs: - name: create replicated mirrored pool on cluster 1 run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ yq w -i pool-test.yaml spec.mirroring.enabled true yq w -i pool-test.yaml spec.mirroring.mode image kubectl create -f pool-test.yaml @@ -631,7 +631,7 @@ jobs: - name: create replicated mirrored pool 2 on cluster 1 run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ yq w -i pool-test.yaml metadata.name replicapool2 kubectl create -f pool-test.yaml timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool2 -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' @@ -639,14 +639,14 @@ jobs: - name: create replicated mirrored pool on cluster 2 run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ yq w -i pool-test.yaml metadata.namespace rook-ceph-secondary kubectl create -f pool-test.yaml timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' - name: create replicated mirrored pool 2 on cluster 2 run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ yq w -i pool-test.yaml metadata.name replicapool2 kubectl create -f pool-test.yaml timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' @@ -719,15 +719,15 @@ jobs: - name: create replicated mirrored filesystem on cluster 1 run: | - PRIMARY_YAML=cluster/examples/kubernetes/ceph/filesystem-test-primary.yaml - cp cluster/examples/kubernetes/ceph/filesystem-test.yaml "$PRIMARY_YAML" + PRIMARY_YAML=deploy/examples/filesystem-test-primary.yaml + cp deploy/examples/filesystem-test.yaml "$PRIMARY_YAML" yq merge --inplace --arrays append "$PRIMARY_YAML" tests/manifests/test-fs-mirror-spec.yaml kubectl create -f "$PRIMARY_YAML" timeout 120 sh -c 'until [ "$(kubectl -n rook-ceph get cephfilesystem myfs -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for filesystem to be created" && sleep 1; done' - name: create replicated mirrored filesystem on cluster 2 run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ yq w -i filesystem-test.yaml metadata.namespace rook-ceph-secondary yq w -i filesystem-test.yaml spec.mirroring.enabled true kubectl create -f filesystem-test.yaml diff --git a/.github/workflows/daily-nightly-jobs.yml b/.github/workflows/daily-nightly-jobs.yml index fee8a7442070..daf8e6accfb1 100644 --- a/.github/workflows/daily-nightly-jobs.yml +++ b/.github/workflows/daily-nightly-jobs.yml @@ -58,12 +58,12 @@ jobs: # Use the official build images for the nightly arm tests instead of rebuilding export USE_LOCAL_BUILD=false # removing liveness probes since the env is slow and the probe is killing the daemons - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true - yq write -d1 -i cluster/examples/kubernetes/ceph/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true + yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.mon.disabled" true + yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.mgr.disabled" true + yq write -d1 -i deploy/examples/cluster-test.yaml "spec.healthCheck.livenessProbe.osd.disabled" true tests/scripts/github-action-helper.sh deploy_cluster # there are no package for arm64 nfs-ganesha - kubectl delete -f cluster/examples/kubernetes/ceph/nfs-test.yaml + kubectl delete -f deploy/examples/nfs-test.yaml - name: wait for prepare pod run: timeout 900 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' || kubectl -n rook-ceph get all && kubectl logs -n rook-ceph deploy/rook-ceph-operator diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 7ac4c8ab5623..1d2f7b81fb72 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -35,4 +35,4 @@ jobs: uses: helm/chart-testing-action@v2.1.0 - name: Run chart-testing (lint) - run: ct lint --charts=./cluster/charts/rook-ceph --validate-yaml=false --validate-maintainers=false + run: ct lint --charts=./deploy/charts/rook-ceph --validate-yaml=false --validate-maintainers=false diff --git a/.github/workflows/rgw-multisite-test/action.yml b/.github/workflows/rgw-multisite-test/action.yml index ff3125d7f7b8..938f26d96a9c 100644 --- a/.github/workflows/rgw-multisite-test/action.yml +++ b/.github/workflows/rgw-multisite-test/action.yml @@ -33,20 +33,20 @@ runs: shell: bash --noprofile --norc -eo pipefail -x {0} run: | tests/scripts/github-action-helper.sh replace_ceph_image \ - "cluster/examples/kubernetes/ceph/cluster-test.yaml" "${{ inputs.ceph-image }}" + "deploy/examples/cluster-test.yaml" "${{ inputs.ceph-image }}" - name: deploy first cluster rook shell: bash --noprofile --norc -eo pipefail -x {0} run: | tests/scripts/github-action-helper.sh deploy_first_rook_cluster - kubectl create -f cluster/examples/kubernetes/ceph/object-multisite-test.yaml + kubectl create -f deploy/examples/object-multisite-test.yaml # wait for multisite store to be created tests/scripts/github-action-helper.sh wait_for_rgw rook-ceph - name: prep second cluster pull realm config shell: bash --noprofile --norc -eo pipefail -x {0} run: | - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ IP_ADDR=$(kubectl -n rook-ceph get svc rook-ceph-rgw-multisite-store -o jsonpath="{.spec.clusterIP}") yq w -i -d1 object-multisite-pull-realm-test.yaml spec.pull.endpoint http://${IP_ADDR}:80 BASE64_ACCESS_KEY=$(kubectl -n rook-ceph get secrets realm-a-keys -o jsonpath="{.data.access-key}") @@ -58,7 +58,7 @@ runs: shell: bash --noprofile --norc -eo pipefail -x {0} run: | tests/scripts/github-action-helper.sh deploy_second_rook_cluster - kubectl create -f cluster/examples/kubernetes/ceph/object-multisite-pull-realm-test.yaml + kubectl create -f deploy/examples/object-multisite-pull-realm-test.yaml # wait for realms to be pulled and zone-b-multisite-store to be created tests/scripts/github-action-helper.sh wait_for_rgw rook-ceph-secondary diff --git a/.github/workflows/yaml-lint.yaml b/.github/workflows/yaml-lint.yaml index 70a779f19b7a..f888bf1d26ea 100644 --- a/.github/workflows/yaml-lint.yaml +++ b/.github/workflows/yaml-lint.yaml @@ -28,4 +28,4 @@ jobs: run: pip install yamllint - name: Lint YAML files - run: yamllint -c .github/workflows/.yamllint cluster/examples/kubernetes/ --no-warnings + run: yamllint -c .github/workflows/.yamllint deploy/examples/ --no-warnings diff --git a/.gitignore b/.gitignore index 38b39cd3148c..4397d0ce16ed 100644 --- a/.gitignore +++ b/.gitignore @@ -8,5 +8,5 @@ /tests/integration/rook-test/ # OLM related stuff -cluster/olm/ceph/deploy/* -cluster/olm/ceph/templates/* +deploy/olm/deploy/* +deploy/olm/templates/* diff --git a/Documentation/admission-controller-usage.md b/Documentation/admission-controller-usage.md index 2cfbf73d4381..2c3ee79937f8 100644 --- a/Documentation/admission-controller-usage.md +++ b/Documentation/admission-controller-usage.md @@ -19,13 +19,14 @@ This script will help us achieve the following tasks 2. Creates ValidatingWebhookConfig and fills the CA bundle with the appropriate value from the cluster. Run the following commands: + ```console -kubectl create -f cluster/examples/kubernetes/ceph/crds.yaml -f cluster/examples/kubernetes/ceph/common.yaml +kubectl create -f deploy/examples/crds.yaml -f deploy/examples/common.yaml tests/scripts/deploy_admission_controller.sh ``` Now that the Secrets have been deployed, we can deploy the operator: ```console -kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml +kubectl create -f deploy/examples/operator.yaml ``` At this point the operator will start the admission controller Deployment automatically and the Webhook will start intercepting requests for Rook resources. diff --git a/Documentation/authenticated-registry.md b/Documentation/authenticated-registry.md index f9903e8cc96f..bf094ec115ce 100644 --- a/Documentation/authenticated-registry.md +++ b/Documentation/authenticated-registry.md @@ -41,8 +41,8 @@ The service accounts are: * `rook-ceph-mgr` (namespace: `rook-ceph`): Will affect the MGR pods in the `rook-ceph` namespace. * `rook-ceph-osd` (namespace: `rook-ceph`): Will affect the OSD pods in the `rook-ceph` namespace. -You can do it either via e.g. `kubectl -n edit serviceaccount default` or by modifying the [`operator.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) -and [`cluster.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml) before deploying them. +You can do it either via e.g. `kubectl -n edit serviceaccount default` or by modifying the [`operator.yaml`](https://github.com/rook/rook/blob/master/deploy/examples/operator.yaml) +and [`cluster.yaml`](https://github.com/rook/rook/blob/master/deploy/examples/cluster.yaml) before deploying them. Since it's the same procedure for all service accounts, here is just one example: diff --git a/Documentation/ceph-advanced-configuration.md b/Documentation/ceph-advanced-configuration.md index e54d122833ae..089a98c8b996 100644 --- a/Documentation/ceph-advanced-configuration.md +++ b/Documentation/ceph-advanced-configuration.md @@ -45,7 +45,7 @@ This will help you manage namespaces more easily, but you should still make sure configured to your liking. ```sh -cd cluster/examples/kubernetes/ceph +cd deploy/examples export ROOK_OPERATOR_NAMESPACE="rook-ceph" export ROOK_CLUSTER_NAMESPACE="rook-ceph" @@ -68,7 +68,7 @@ kubectl apply -f common.yaml -f operator.yaml -f cluster.yaml # add other files If you wish to create a new CephCluster in a different namespace than `rook-ceph` while using a single operator to manage both clusters execute the following: ```sh -cd cluster/examples/kubernetes/ceph +cd deploy/examples NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - ``` diff --git a/Documentation/ceph-block.md b/Documentation/ceph-block.md index 0bcad999fde0..2e894dc94af3 100644 --- a/Documentation/ceph-block.md +++ b/Documentation/ceph-block.md @@ -95,7 +95,7 @@ provisioner value should be "my-namespace.rbd.csi.ceph.com". Create the storage class. ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml +kubectl create -f deploy/examples/csi/rbd/storageclass.yaml ``` > **NOTE**: As [specified by Kubernetes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain), when using the `Retain` reclaim policy, any Ceph RBD image that is backed by a `PersistentVolume` will continue to exist even after the `PersistentVolume` has been deleted. These Ceph RBD images will need to be cleaned up manually using `rbd rm`. @@ -105,7 +105,7 @@ kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml We create a sample app to consume the block storage provisioned by Rook with the classic wordpress and mysql apps. Both of these apps will make use of block volumes provisioned by Rook. -Start mysql and wordpress from the `cluster/examples/kubernetes` folder: +Start mysql and wordpress from the `deploy/examples` folder: ```console kubectl create -f mysql.yaml @@ -175,5 +175,10 @@ The OSDs must be located on different nodes, because the [`failureDomain`](ceph- ### Erasure Coded CSI Driver The erasure coded pool must be set as the `dataPool` parameter in -[`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName -}}/cluster/examples/kubernetes/ceph/csi/rbd/storage-class-ec.yaml) It is used for the data of the RBD images. +[`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/csi/rbd/storage-class-ec.yaml) It is used for the data of the RBD images. + +### Erasure Coded Flex Driver + +The erasure coded pool must be set as the `dataBlockPool` parameter in +[`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/flex/storage-class-ec.yaml). It is used for +the data of the RBD images. diff --git a/Documentation/ceph-cluster-crd.md b/Documentation/ceph-cluster-crd.md index 47f2c7dbd0ba..48708033ea40 100755 --- a/Documentation/ceph-cluster-crd.md +++ b/Documentation/ceph-cluster-crd.md @@ -541,7 +541,7 @@ A Placement configuration is specified (according to the kubernetes PodSpec) as: * `tolerations`: list of kubernetes [Toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) * `topologySpreadConstraints`: kubernetes [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -If you use `labelSelector` for `osd` pods, you must write two rules both for `rook-ceph-osd` and `rook-ceph-osd-prepare` like [the example configuration](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml#L68). It comes from the design that there are these two pods for an OSD. For more detail, see the [osd design doc](https://github.com/rook/rook/blob/master/design/ceph/dedicated-osd-pod.md) and [the related issue](https://github.com/rook/rook/issues/4582). +If you use `labelSelector` for `osd` pods, you must write two rules both for `rook-ceph-osd` and `rook-ceph-osd-prepare` like [the example configuration](https://github.com/rook/rook/blob/master/deploy/examples/cluster-on-pvc.yaml#L68). It comes from the design that there are these two pods for an OSD. For more detail, see the [osd design doc](https://github.com/rook/rook/blob/master/design/ceph/dedicated-osd-pod.md) and [the related issue](https://github.com/rook/rook/issues/4582). The Rook Ceph operator creates a Job called `rook-ceph-detect-version` to detect the full Ceph version used by the given `cephVersion.image`. The placement from the `mon` section is used for the Job except for the `PodAntiAffinity` field. @@ -1279,7 +1279,7 @@ The features available from the external cluster will vary depending on the vers #### Pre-requisites In order to configure an external Ceph cluster with Rook, we need to inject some information in order to connect to that cluster. -You can use the `cluster/examples/kubernetes/ceph/import-external-cluster.sh` script to achieve that. +You can use the `deploy/examples/import-external-cluster.sh` script to achieve that. The script will look for the following populated environment variables: * `NAMESPACE`: the namespace where the configmap and secrets should be injected @@ -1308,14 +1308,14 @@ If the Ceph admin key is not provided, the following script needs to be executed On that machine, run: ```sh -. cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh +. deploy/examples/create-external-cluster-resources.sh ``` The script will source all the necessary environment variables for you. It assumes the namespace name is `rook-ceph-external`. This can be changed by running the script like (assuming namespace name is `foo` this time): ```sh -ns=foo . cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh +ns=foo . deploy/examples/create-external-cluster-resources.sh ``` When done you can execute: `import-external-cluster.sh` to inject them in your Kubernetes cluster. @@ -1342,12 +1342,12 @@ In this example, you can simply export RGW_POOL_PREFIX before executing the scri export RGW_POOL_PREFIX=my-store ``` -The script will automatically create users and keys with the lowest possible privileges and populate the necessary environment variables for `cluster/examples/kubernetes/ceph/import-external-cluster.sh` to work correctly. +The script will automatically create users and keys with the lowest possible privileges and populate the necessary environment variables for `deploy/examples/import-external-cluster.sh` to work correctly. Finally, you can simply execute the script like this from a machine that has access to your Kubernetes cluster: ```console -bash cluster/examples/kubernetes/ceph/import-external-cluster.sh +bash deploy/examples/import-external-cluster.sh ``` #### CephCluster example (consumer) @@ -1380,7 +1380,7 @@ Additionally, you now need to inject `common-external.yaml` too. You can now create it like this: ```console -kubectl create -f cluster/examples/kubernetes/ceph/cluster-external.yaml +kubectl create -f deploy/examples/cluster-external.yaml ``` If the previous section has not been completed, the Rook Operator will still acknowledge the CR creation but will wait forever to receive connection information. diff --git a/Documentation/ceph-csi-drivers.md b/Documentation/ceph-csi-drivers.md index cf6572b36ce2..5e8ff8871623 100644 --- a/Documentation/ceph-csi-drivers.md +++ b/Documentation/ceph-csi-drivers.md @@ -122,7 +122,7 @@ provisioned and used by the pod with its lifecycle. The volumes are provisioned when pod get spawned and destroyed at time of pod delete. Refer to [ephemeral-doc](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) for more info. -Also, See the example manifests for an [RBD ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml) and a [CephFS ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml). +Also, See the example manifests for an [RBD ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/rbd/pod-ephemeral.yaml) and a [CephFS ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/cephfs/pod-ephemeral.yaml). ### Prerequisites Kubernetes version 1.21 or greater is required. diff --git a/Documentation/ceph-csi-snapshot.md b/Documentation/ceph-csi-snapshot.md index 08809deed8bb..5253168c3424 100644 --- a/Documentation/ceph-csi-snapshot.md +++ b/Documentation/ceph-csi-snapshot.md @@ -26,12 +26,11 @@ In short, as the documentation describes it: If your Kubernetes version is updated to a newer version of the snapshot API, follow the upgrade guide [here](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0#upgrade) to upgrade from v1alpha1 to v1beta1, or v1beta1 to v1. - ## RBD Snapshots ### VolumeSnapshotClass -In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml), +In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/rbd/snapshotclass.yaml), the `csi.storage.k8s.io/snapshotter-secret-name` parameter should reference the name of the secret created for the rbdplugin and `pool` to reflect the Ceph pool name. @@ -41,18 +40,18 @@ maintain a configmap whose contents will match this key. By default this is "rook-ceph". ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml +kubectl create -f deploy/examples/csi/rbd/snapshotclass.yaml ``` ### Volumesnapshot -In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml), +In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/rbd/snapshot.yaml), `volumeSnapshotClassName` should be the name of the `VolumeSnapshotClass` previously created. The `persistentVolumeClaimName` should be the name of the PVC which is already created by the RBD CSI driver. ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml +kubectl create -f deploy/examples/csi/rbd/snapshot.yaml ``` ### Verify RBD Snapshot Creation @@ -81,14 +80,14 @@ The snapshot will be ready to restore to a new PVC when the `READYTOUSE` field o ### Restore the snapshot to a new PVC In -[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml), +[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/rbd/pvc-restore.yaml), `dataSource` should be the name of the `VolumeSnapshot` previously created. The `dataSource` kind should be the `VolumeSnapshot`. Create a new PVC from the snapshot ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml +kubectl create -f deploy/examples/csi/rbd/pvc-restore.yaml ``` ### Verify RBD Clone PVC Creation @@ -107,16 +106,16 @@ kubectl get pvc To clean your cluster of the resources created by this example, run the following: ```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml +kubectl delete -f deploy/examples/csi/rbd/pvc-restore.yaml +kubectl delete -f deploy/examples/csi/rbd/snapshot.yaml +kubectl delete -f deploy/examples/csi/rbd/snapshotclass.yaml ``` ## CephFS Snapshots ### VolumeSnapshotClass -In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml), +In [VolumeSnapshotClass](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/cephfs/snapshotclass.yaml), the `csi.storage.k8s.io/snapshotter-secret-name` parameter should reference the name of the secret created for the cephfsplugin. @@ -127,18 +126,18 @@ maintain a configmap whose contents will match this key. By default this is ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml +kubectl create -f deploy/examples/csi/cephfs/snapshotclass.yaml ``` ### Volumesnapshot -In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml), +In [snapshot](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/cephfs/snapshot.yaml), `volumeSnapshotClassName` should be the name of the `VolumeSnapshotClass` previously created. The `persistentVolumeClaimName` should be the name of the PVC which is already created by the CephFS CSI driver. ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml +kubectl create -f deploy/examples/csi/cephfs/snapshot.yaml ``` ### Verify CephFS Snapshot Creation @@ -165,14 +164,14 @@ The snapshot will be ready to restore to a new PVC when `READYTOUSE` field of th ### Restore the snapshot to a new PVC In -[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml), +[pvc-restore](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/cephfs/pvc-restore.yaml), `dataSource` should be the name of the `VolumeSnapshot` previously created. The `dataSource` kind should be the `VolumeSnapshot`. Create a new PVC from the snapshot ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml +kubectl create -f deploy/examples/csi/cephfs/pvc-restore.yaml ``` ### Verify CephFS Restore PVC Creation @@ -192,9 +191,9 @@ kubectl get pvc To clean your cluster of the resources created by this example, run the following: ```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml +kubectl delete -f deploy/examples/csi/cephfs/pvc-restore.yaml +kubectl delete -f deploy/examples/csi/cephfs/snapshot.yaml +kubectl delete -f deploy/examples/csi/cephfs/snapshotclass.yaml ``` ## Limitations diff --git a/Documentation/ceph-csi-volume-clone.md b/Documentation/ceph-csi-volume-clone.md index 73585634b0b1..6aff19791768 100644 --- a/Documentation/ceph-csi-volume-clone.md +++ b/Documentation/ceph-csi-volume-clone.md @@ -25,7 +25,7 @@ for more info. ### Volume Cloning In -[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml), +[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/rbd/pvc-clone.yaml), `dataSource` should be the name of the `PVC` which is already created by RBD CSI driver. The `dataSource` kind should be the `PersistentVolumeClaim` and also storageclass should be same as the source `PVC`. @@ -33,7 +33,7 @@ should be same as the source `PVC`. Create a new PVC Clone from the PVC ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml +kubectl create -f deploy/examples/csi/rbd/pvc-clone.yaml ``` ### Verify RBD volume Clone PVC Creation @@ -53,7 +53,7 @@ kubectl get pvc To clean your cluster of the resources created by this example, run the following: ```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml +kubectl delete -f deploy/examples/csi/rbd/pvc-clone.yaml ``` ## CephFS Volume Cloning @@ -66,7 +66,7 @@ kubectl delete -f cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml ### Volume Cloning In -[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml), +[pvc-clone](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples/csi/cephfs/pvc-clone.yaml), `dataSource` should be the name of the `PVC` which is already created by CephFS CSI driver. The `dataSource` kind should be the `PersistentVolumeClaim` and also storageclass should be same as the source `PVC`. @@ -74,7 +74,7 @@ should be same as the source `PVC`. Create a new PVC Clone from the PVC ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml +kubectl create -f deploy/examples/csi/cephfs/pvc-clone.yaml ``` ### Verify CephFS volume Clone PVC Creation @@ -94,5 +94,5 @@ kubectl get pvc To clean your cluster of the resources created by this example, run the following: ```console -kubectl delete -f cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml +kubectl delete -f deploy/examples/csi/cephfs/pvc-clone.yaml ``` diff --git a/Documentation/ceph-examples.md b/Documentation/ceph-examples.md index b80c10a9f948..cc98a03c98f1 100644 --- a/Documentation/ceph-examples.md +++ b/Documentation/ceph-examples.md @@ -9,13 +9,13 @@ indent: true Configuration for Rook and Ceph can be configured in multiple ways to provide block devices, shared filesystem volumes or object storage in a kubernetes namespace. We have provided several examples to simplify storage setup, but remember there are many tunables and you will need to decide what settings work for your use case and environment. -See the **[example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph)** folder for all the rook/ceph setup example spec files. +See the **[example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples)** folder for all the rook/ceph setup example spec files. ## Common Resources The first step to deploy Rook is to create the CRDs and other common resources. The configuration for these resources will be the same for most deployments. -The [crds.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/crds.yaml) and -[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) sets these resources up. +The [crds.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/crds.yaml) and +[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/common.yaml) sets these resources up. ```console kubectl create -f crds.yaml -f common.yaml @@ -25,14 +25,14 @@ The examples all assume the operator and all Ceph daemons will be started in the ## Operator -After the common resources are created, the next step is to create the Operator deployment. Several spec file examples are provided in [this directory](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/): +After the common resources are created, the next step is to create the Operator deployment. Several spec file examples are provided in [this directory](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/): * `operator.yaml`: The most common settings for production deployments * `kubectl create -f operator.yaml` * `operator-openshift.yaml`: Includes all of the operator settings for running a basic Rook cluster in an OpenShift environment. You will also want to review the [OpenShift Prerequisites](ceph-openshift.md) to confirm the settings. * `oc create -f operator-openshift.yaml` -Settings for the operator are configured through environment variables on the operator deployment. The individual settings are documented in [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml). +Settings for the operator are configured through environment variables on the operator deployment. The individual settings are documented in [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/operator.yaml). ## Cluster CRD @@ -97,13 +97,13 @@ See the [Object Store CRD](ceph-object-store-crd.md) topic for more details on t ### Object Storage User -* `object-user.yaml`: Creates a simple object storage user and generates credentials for the S3 API +- `object-user.yaml`: Creates a simple object storage user and generates credentials for the S3 API ### Object Storage Buckets The Ceph operator also runs an object store bucket provisioner which can grant access to existing buckets or dynamically provision new buckets. -* [object-bucket-claim-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml) Creates a request for a new bucket by referencing a StorageClass which saves the bucket when the initiating OBC is deleted. -* [object-bucket-claim-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml) Creates a request for a new bucket by referencing a StorageClass which deletes the bucket when the initiating OBC is deleted. -* [storageclass-bucket-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and retains the bucket after the initiating OBC is deleted. -* [storageclass-bucket-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and deletes the bucket after the initiating OBC is deleted. +* [object-bucket-claim-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/object-bucket-claim-retain.yaml) Creates a request for a new bucket by referencing a StorageClass which saves the bucket when the initiating OBC is deleted. +* [object-bucket-claim-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/object-bucket-claim-delete.yaml) Creates a request for a new bucket by referencing a StorageClass which deletes the bucket when the initiating OBC is deleted. +* [storageclass-bucket-retain.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/storageclass-bucket-retain.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and retains the bucket after the initiating OBC is deleted. +* [storageclass-bucket-delete.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/storageclass-bucket-delete.yaml) Creates a new StorageClass which defines the Ceph Object Store, a region, and deletes the bucket after the initiating OBC is deleted. diff --git a/Documentation/ceph-filesystem-crd.md b/Documentation/ceph-filesystem-crd.md index 4b2b533cc05d..f0a6a6d60ca3 100644 --- a/Documentation/ceph-filesystem-crd.md +++ b/Documentation/ceph-filesystem-crd.md @@ -65,7 +65,7 @@ spec: # memory: "1024Mi" ``` -(These definitions can also be found in the [`filesystem.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/filesystem.yaml) file) +(These definitions can also be found in the [`filesystem.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/filesystem.yaml) file) ### Erasure Coded @@ -94,8 +94,8 @@ spec: activeStandby: true ``` -(These definitions can also be found in the [`filesystem-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/filesystem-ec.yaml) file. -Also see an example in the [`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml) for how to configure the volume.) +(These definitions can also be found in the [`filesystem-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/filesystem-ec.yaml) file. +Also see an example in the [`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/csi/cephfs/storageclass-ec.yaml) for how to configure the volume.) ### Mirroring @@ -211,7 +211,7 @@ The metadata server settings correspond to the MDS daemon settings. * `duration`: * `annotations`: Key value pair list of annotations to add. * `labels`: Key value pair list of labels to add. -* `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). +* `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster.yaml). * `resources`: Set resource requests/limits for the Filesystem MDS Pod(s), see [MDS Resources Configuration Settings](#mds-resources-configuration-settings) * `priorityClassName`: Set priority class name for the Filesystem MDS Pod(s) diff --git a/Documentation/ceph-filesystem.md b/Documentation/ceph-filesystem.md index 30f1fdce6a40..59af1a91c1fc 100644 --- a/Documentation/ceph-filesystem.md +++ b/Documentation/ceph-filesystem.md @@ -120,7 +120,7 @@ provisioner value should be "rook-op.rbd.csi.ceph.com". Create the storage class. ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml +kubectl create -f deploy/examples/csi/cephfs/storageclass.yaml ``` ## Quotas @@ -213,7 +213,7 @@ spec: Create the Kube registry deployment: ```console -kubectl create -f cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml +kubectl create -f deploy/examples/csi/cephfs/kube-registry.yaml ``` You now have a docker registry which is HA with persistent storage. diff --git a/Documentation/ceph-fs-mirror-crd.md b/Documentation/ceph-fs-mirror-crd.md index 4d1c13cf47ff..7e0bda0617be 100644 --- a/Documentation/ceph-fs-mirror-crd.md +++ b/Documentation/ceph-fs-mirror-crd.md @@ -38,7 +38,7 @@ If any setting is unspecified, a suitable default will be used automatically. ### FilesystemMirror Settings -- `placement`: The cephfs-mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). +- `placement`: The cephfs-mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster.yaml). - `annotations`: Key value pair list of annotations to add. - `labels`: Key value pair list of labels to add. - `resources`: The resource requirements for the cephfs-mirror pods. diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index 4eb9b4846ad0..cdb332efbda5 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -39,7 +39,7 @@ From the root of your locally cloned Rook repo, go the monitoring directory: ```console $ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph/monitoring +cd rook/deploy/examples/monitoring ``` Create the service monitor as well as the Prometheus server pod and service: @@ -100,7 +100,7 @@ To enable the Ceph Prometheus alerts follow these steps: 1. Create the RBAC rules to enable monitoring. ```console -kubectl create -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml +kubectl create -f deploy/examples/monitoring/rbac.yaml ``` 2. Make following changes to your CephCluster object (e.g., `cluster.yaml`). @@ -151,7 +151,7 @@ with each update or upgrade. This should be done at the same time you update Roo like `common.yaml`. ```console -kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml +kubectl apply -f deploy/examples/monitoring/rbac.yaml ``` > This is updated automatically if you are upgrading via the helm chart diff --git a/Documentation/ceph-object-multisite.md b/Documentation/ceph-object-multisite.md index db9061b0ab77..80daa8e2dbca 100644 --- a/Documentation/ceph-object-multisite.md +++ b/Documentation/ceph-object-multisite.md @@ -33,7 +33,8 @@ If an admin wants to set up multisite on a Rook Ceph cluster, the admin should c 1. A [zone](/Documentation/ceph-object-multisite-crd.md#object-zone-settings) 1. An [object-store](/Documentation/ceph-object-store-crd.md#zone-settings) with the `zone` section -object-multisite.yaml in the [examples](/cluster/examples/kubernetes/ceph/) directory can be used to create the multisite CRDs. +object-multisite.yaml in the [examples](/deploy/examples/) directory can be used to create the multisite CRDs. + ```console kubectl create -f object-multisite.yaml ``` @@ -169,7 +170,8 @@ Once the admin knows the endpoint and the secret for the keys has been created, 1. A [CephObjectZone](/design/ceph/object/zone.md) referring to the CephObjectZoneGroup created above. 1. A [CephObjectStore](/design/ceph/object/store.md) referring to the new CephObjectZone resource. -object-multisite-pull-realm.yaml (with changes) in the [examples](/cluster/examples/kubernetes/ceph/) directory can be used to create the multisite CRDs. +object-multisite-pull-realm.yaml (with changes) in the [examples](/deploy/examples/) directory can be used to create the multisite CRDs. + ```console kubectl create -f object-multisite-pull-realm.yaml ``` diff --git a/Documentation/ceph-openshift-issues.md b/Documentation/ceph-openshift-issues.md index d637e1b63e01..a51316837116 100644 --- a/Documentation/ceph-openshift-issues.md +++ b/Documentation/ceph-openshift-issues.md @@ -12,7 +12,7 @@ OpenShift Console uses OpenShift Prometheus for monitoring and populating data i 1. Change the monitoring namespace to `openshift-monitoring` - Change the namespace of the RoleBinding `rook-ceph-metrics` from `rook-ceph` to `openshift-monitoring` for the `prometheus-k8s` ServiceAccount in [rbac.yaml](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml#L70). + Change the namespace of the RoleBinding `rook-ceph-metrics` from `rook-ceph` to `openshift-monitoring` for the `prometheus-k8s` ServiceAccount in [rbac.yaml](https://github.com/rook/rook/blob/master/deploy/examples/monitoring/rbac.yaml#L70). ``` subjects: diff --git a/Documentation/ceph-openshift.md b/Documentation/ceph-openshift.md index 3864fc43334e..378c7a3a23b3 100644 --- a/Documentation/ceph-openshift.md +++ b/Documentation/ceph-openshift.md @@ -9,7 +9,7 @@ indent: true [OpenShift](https://www.openshift.com/) adds a number of security and other enhancements to Kubernetes. In particular, [security context constraints](https://blog.openshift.com/understanding-service-accounts-sccs/) allow the cluster admin to define exactly which permissions are allowed to pods running in the cluster. You will need to define those permissions that allow the Rook pods to run. -The settings for Rook in OpenShift are described below, and are also included in the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph): +The settings for Rook in OpenShift are described below, and are also included in the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples): * `operator-openshift.yaml`: Creates the security context constraints and starts the operator deployment * `object-openshift.yaml`: Creates an object store with rgw listening on a valid port number for OpenShift @@ -35,7 +35,7 @@ To orchestrate the storage platform, Rook requires the following access in the c ## Security Context Constraints -Before starting the Rook operator or cluster, create the security context constraints needed by the Rook pods. The following yaml is found in `operator-openshift.yaml` under `/cluster/examples/kubernetes/ceph`. +Before starting the Rook operator or cluster, create the security context constraints needed by the Rook pods. The following yaml is found in `operator-openshift.yaml` under `/deploy/examples`. > **NOTE**: Older versions of OpenShift may require `apiVersion: v1`. diff --git a/Documentation/ceph-osd-mgmt.md b/Documentation/ceph-osd-mgmt.md index dbe46ae572a2..133eb96e84f7 100644 --- a/Documentation/ceph-osd-mgmt.md +++ b/Documentation/ceph-osd-mgmt.md @@ -53,6 +53,7 @@ to the updated cluster CR. To remove an OSD due to a failed disk or other re-configuration, consider the following to ensure the health of the data through the removal process: + - Confirm you will have enough space on your cluster after removing your OSDs to properly handle the deletion - Confirm the remaining OSDs and their placement groups (PGs) are healthy in order to handle the rebalancing of the data - Do not remove too many OSDs at once @@ -106,7 +107,7 @@ in the toolbox may show which OSD is `down`. If you want to remove a healthy OSD ### Purge the OSD from the Ceph cluster -OSD removal can be automated with the example found in the [rook-ceph-purge-osd job](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/osd-purge.yaml). +OSD removal can be automated with the example found in the [rook-ceph-purge-osd job](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/osd-purge.yaml). In the osd-purge.yaml, change the `` to the ID(s) of the OSDs you want to remove. 1. Run the job: `kubectl create -f osd-purge.yaml` diff --git a/Documentation/ceph-rbd-mirror-crd.md b/Documentation/ceph-rbd-mirror-crd.md index e768ecf1cde1..d1a67803f8f0 100644 --- a/Documentation/ceph-rbd-mirror-crd.md +++ b/Documentation/ceph-rbd-mirror-crd.md @@ -41,7 +41,7 @@ If any setting is unspecified, a suitable default will be used automatically. ### RBDMirror Settings * `count`: The number of rbd mirror instance to run. -* `placement`: The rbd mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). +* `placement`: The rbd mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster.yaml).. * `annotations`: Key value pair list of annotations to add. * `labels`: Key value pair list of labels to add. * `resources`: The resource requirements for the rbd mirror pods. diff --git a/Documentation/ceph-toolbox.md b/Documentation/ceph-toolbox.md index a9dd01185e61..c360331aeaa3 100644 --- a/Documentation/ceph-toolbox.md +++ b/Documentation/ceph-toolbox.md @@ -23,7 +23,7 @@ run arbitrary Ceph commands. Launch the rook-ceph-tools pod: ```console -kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml +kubectl create -f deploy/examples/toolbox.yaml ``` Wait for the toolbox pod to download its container and get to the `running` state: @@ -63,7 +63,7 @@ In this example, the `ceph status` command is executed when the job is created. Create the toolbox job: ```console -kubectl create -f cluster/examples/kubernetes/ceph/toolbox-job.yaml +kubectl create -f deploy/examples/toolbox-job.yaml ``` After the job completes, see the results of the script: diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index ec94245c3469..db7faf4ac282 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -59,7 +59,7 @@ the following: First get the latest common resources manifests that contain the latest changes for Rook v1.7. ```sh git clone --single-branch --depth=1 --branch v1.7.1 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph +cd rook/deploy/examples ``` If you have deployed the Rook Operator or the Ceph cluster into a different namespace than @@ -103,7 +103,7 @@ time without compatibility support and without prior notice. We will do all our work in the Ceph example manifests directory. ```sh -$ cd $YOUR_ROOK_REPO/cluster/examples/kubernetes/ceph/ +$ cd $YOUR_ROOK_REPO/deploy/examples/ ``` Unless your Rook cluster was created with customized namespaces, namespaces for Rook clusters are @@ -275,7 +275,7 @@ needed by the Operator. Also update the Custom Resource Definitions (CRDs). Get the latest common resources manifests that contain the latest changes. ```sh git clone --single-branch --depth=1 --branch v1.7.0 https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph +cd rook/deploy/examples ``` If you have deployed the Rook Operator or the Ceph cluster into a different namespace than @@ -299,7 +299,7 @@ If you have [Prometheus monitoring](ceph-monitoring.md) enabled, follow the step to upgrade the Prometheus RBAC resources as well. ```sh -kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml +kubectl apply -f deploy/examples/monitoring/rbac.yaml ``` ### **2. Update Ceph CSI versions** diff --git a/Documentation/direct-tools.md b/Documentation/direct-tools.md index 62aa95cc42f0..f3478c976702 100644 --- a/Documentation/direct-tools.md +++ b/Documentation/direct-tools.md @@ -16,7 +16,7 @@ If your pod dies, your mount will die with it. To test mounting your Ceph volumes, start a pod with the necessary mounts. An example is provided in the examples test directory: ```console -kubectl create -f cluster/examples/kubernetes/ceph/direct-mount.yaml +kubectl create -f deploy/examples/direct-mount.yaml ``` After the pod is started, connect to it like this: diff --git a/Documentation/helm-ceph-cluster.md b/Documentation/helm-ceph-cluster.md index ff37077489be..46a2fb8a793f 100644 --- a/Documentation/helm-ceph-cluster.md +++ b/Documentation/helm-ceph-cluster.md @@ -33,7 +33,7 @@ Rook currently publishes builds of this chart to the `release` and `master` chan **Before installing, review the values.yaml to confirm if the default settings need to be updated.** * If the operator was installed in a namespace other than `rook-ceph`, the namespace must be set in the `operatorNamespace` variable. -* Set the desired settings in the `cephClusterSpec`. The [defaults](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph-cluster/values.yaml) +* Set the desired settings in the `cephClusterSpec`. The [defaults](https://github.com/rook/rook/tree/{{ branchName }}/deploy/charts/rook-ceph-cluster/values.yaml) are only an example and not likely to apply to your cluster. * The `monitoring` section should be removed from the `cephClusterSpec`, as it is specified separately in the helm settings. * The default values for `cephBlockPools`, `cephFileSystems`, and `CephObjectStores` will create one of each, and their corresponding storage classes. @@ -143,7 +143,7 @@ chart to start managing the cluster: To deploy from a local build from your development environment: ```console -cd cluster/charts/rook-ceph-cluster +cd deploy/charts/rook-ceph-cluster helm install --create-namespace --namespace rook-ceph rook-ceph-cluster -f values-override.yaml . ``` diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md index 9ae2a5879695..1febdd809b05 100644 --- a/Documentation/helm-operator.md +++ b/Documentation/helm-operator.md @@ -49,7 +49,7 @@ To deploy from a local build from your development environment: 1. Install the helm chart: ```console -cd cluster/charts/rook-ceph +cd deploy/charts/rook-ceph helm install --create-namespace --namespace rook-ceph rook-ceph . ``` @@ -163,4 +163,4 @@ Alternatively, a yaml file that specifies the values for the above parameters (` helm install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml ``` -For example settings, see [values.yaml](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph/values.yaml) +For example settings, see [values.yaml](https://github.com/rook/rook/tree/{{ branchName }}/deploy/charts/rook-ceph/values.yaml) diff --git a/Documentation/helm.md b/Documentation/helm.md index 4b919d77836e..f71f8e3a2052 100644 --- a/Documentation/helm.md +++ b/Documentation/helm.md @@ -14,5 +14,5 @@ Rook has published the following Helm charts for the Ceph storage provider: The Helm charts are intended to simplify deployment and upgrades. Configuring the Rook resources without Helm is also fully supported by creating the -[manifests](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes) +[manifests](https://github.com/rook/rook/tree/{{ branchName }}/deploy/examples) directly. diff --git a/Documentation/pod-security-policies.md b/Documentation/pod-security-policies.md index 9062ab2a2a34..8feb1ba78f1b 100644 --- a/Documentation/pod-security-policies.md +++ b/Documentation/pod-security-policies.md @@ -12,7 +12,7 @@ please review this document. By default, Kubernetes clusters do not have PSPs en be able to skip this document. If you are configuring Ceph on OpenShift, the Ceph walkthrough will configure the PSPs as well -when you start the operator with [operator-openshift.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator-openshift.yaml). +when you start the operator with [operator-openshift.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/operator-openshift.yaml). Creating the Rook operator requires privileges for setting up RBAC. To launch the operator you need to have created your user certificate that is bound to ClusterRole `cluster-admin`. @@ -23,7 +23,7 @@ using [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-secur for the different `ServiceAccounts` Rook uses to start the Rook Storage Pods. Security policies will differ for different backends. See Ceph's Pod Security Policies set up in -[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) +[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/common.yaml) for an example of how this is done in practice. ### PodSecurityPolicy diff --git a/Documentation/quickstart.md b/Documentation/quickstart.md index 39f8be7f2a6a..0cdb3f4c3139 100644 --- a/Documentation/quickstart.md +++ b/Documentation/quickstart.md @@ -31,11 +31,11 @@ In order to configure the Ceph storage cluster, at least one of these local stor ## TL;DR -A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). +A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples). ```console $ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph +cd rook/deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml kubectl create -f cluster.yaml ``` @@ -44,10 +44,10 @@ After the cluster is running, you can create [block, object, or file](#storage) ## Deploy the Rook Operator -The first step is to deploy the Rook operator. Check that you are using the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph) that correspond to your release of Rook. For more options, see the [examples documentation](ceph-examples.md). +The first step is to deploy the Rook operator. Check that you are using the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples) that correspond to your release of Rook. For more options, see the [examples documentation](ceph-examples.md). ```console -cd cluster/examples/kubernetes/ceph +cd deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml # verify the rook-ceph-operator is in the `Running` state before proceeding @@ -57,7 +57,7 @@ kubectl -n rook-ceph get pod You can also deploy the operator with the [Rook Helm Chart](helm-operator.md). Before you start the operator in production, there are some settings that you may want to consider: -1. Consider if you want to enable certain Rook features that are disabled by default. See the [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml) for these and other advanced settings. +1. Consider if you want to enable certain Rook features that are disabled by default. See the [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/operator.yaml) for these and other advanced settings. 1. Device discovery: Rook will watch for new devices to configure if the `ROOK_ENABLE_DISCOVERY_DAEMON` setting is enabled, commonly used in bare metal clusters. 2. Node affinity and tolerations: The CSI driver by default will run on any node in the cluster. To configure the CSI driver affinity, several settings are available. @@ -68,9 +68,10 @@ If you wish to deploy into a namespace other than the default `rook-ceph`, see t The Rook documentation is focused around starting Rook in a production environment. Examples are also provided to relax some settings for test environments. When creating the cluster later in this guide, consider these example cluster manifests: -- [cluster.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml): Cluster settings for a production cluster running on bare metal. Requires at least three worker nodes. -- [cluster-on-pvc.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml): Cluster settings for a production cluster running in a dynamic cloud environment. -- [cluster-test.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-test.yaml): Cluster settings for a test environment such as minikube. + +- [cluster.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster.yaml): Cluster settings for a production cluster running on bare metal. Requires at least three worker nodes. +- [cluster-on-pvc.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster-on-pvc.yaml): Cluster settings for a production cluster running in a dynamic cloud environment. +- [cluster-test.yaml](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples/cluster-test.yaml): Cluster settings for a test environment such as minikube. See the [Ceph examples](ceph-examples.md) for more details. diff --git a/Documentation/rbd-mirroring.md b/Documentation/rbd-mirroring.md index ba4d67dce8a3..507f3d7d3824 100644 --- a/Documentation/rbd-mirroring.md +++ b/Documentation/rbd-mirroring.md @@ -267,7 +267,7 @@ Below guide assumes that we have a PVC (rbd-pvc) in BOUND state; created using In this case, we create a Volume Replication Class on cluster-1 ```bash -[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication-class.yaml +[cluster-1]$ kubectl apply -f deploy/examples/volume-replication-class.yaml ``` > **Note:** The `schedulingInterval` can be specified in formats of @@ -281,7 +281,7 @@ In this case, we create a Volume Replication Class on cluster-1 the PVC which we intend to replicate to secondary cluster. ```bash -[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication.yaml +[cluster-1]$ kubectl apply -f deploy/examples/volume-replication.yaml ``` >:memo: *VolumeReplication* is a namespace scoped object. Thus, @@ -373,7 +373,7 @@ Here, we take a backup of PVC and PV object on one site, so that they can be res * Create VolumeReplicationClass on the secondary cluster ```bash -[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication-class.yaml +[cluster-1]$ kubectl apply -f deploy/examples/volume-replication-class.yaml ``` > ```bash diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index cb1233802d5e..3fd997564173 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -12,6 +12,9 @@ v1.8... you will need to convert them to csi volumes. See the flex conversion tool. - Min supported version of K8s is now 1.16. If running on an older version of K8s it is recommended to update to a newer version before updating to Rook v1.8. +- Directory structure of the YAML examples has changed. Files are now in `deploy/examples` and subdirectories. + +### Ceph ## Features diff --git a/build/crds/build-crds.sh b/build/crds/build-crds.sh index 71ea95955503..89890476327e 100755 --- a/build/crds/build-crds.sh +++ b/build/crds/build-crds.sh @@ -32,9 +32,9 @@ if [[ -n "$BUILD_CRDS_INTO_DIR" ]]; then echo "Generating CRDs into dir $BUILD_CRDS_INTO_DIR" DESTINATION_ROOT="$BUILD_CRDS_INTO_DIR" fi -OLM_CATALOG_DIR="${DESTINATION_ROOT}/cluster/olm/ceph/deploy/crds" -CEPH_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/examples/kubernetes/ceph/crds.yaml" -CEPH_HELM_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/charts/rook-ceph/templates/resources.yaml" +OLM_CATALOG_DIR="${DESTINATION_ROOT}/deploy/olm/deploy/crds" +CEPH_CRDS_FILE_PATH="${DESTINATION_ROOT}/deploy/examples/crds.yaml" +CEPH_HELM_CRDS_FILE_PATH="${DESTINATION_ROOT}/deploy/charts/rook-ceph/templates/resources.yaml" ############# # FUNCTIONS # @@ -42,8 +42,8 @@ CEPH_HELM_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/charts/rook-ceph/templates copy_ob_obc_crds() { mkdir -p "$OLM_CATALOG_DIR" - cp -f "${SCRIPT_ROOT}/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml" "$OLM_CATALOG_DIR" - cp -f "${SCRIPT_ROOT}/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml" "$OLM_CATALOG_DIR" + cp -f "${SCRIPT_ROOT}/deploy/olm/assemble/objectbucket.io_objectbucketclaims.yaml" "$OLM_CATALOG_DIR" + cp -f "${SCRIPT_ROOT}/deploy/olm/assemble/objectbucket.io_objectbuckets.yaml" "$OLM_CATALOG_DIR" } generating_crds_v1() { diff --git a/build/makelib/helm.mk b/build/makelib/helm.mk index baeea65d5bbd..e73848512000 100644 --- a/build/makelib/helm.mk +++ b/build/makelib/helm.mk @@ -16,7 +16,7 @@ HELM_CHARTS ?= rook-ceph rook-ceph-cluster HELM_BASE_URL ?= https://charts.rook.io HELM_S3_BUCKET ?= rook.chart -HELM_CHARTS_DIR ?= $(ROOT_DIR)/cluster/charts +HELM_CHARTS_DIR ?= $(ROOT_DIR)/deploy/charts HELM_OUTPUT_DIR ?= $(OUTPUT_DIR)/charts HELM_HOME := $(abspath $(CACHE_DIR)/helm) diff --git a/build/rbac/get-helm-rbac.sh b/build/rbac/get-helm-rbac.sh index 0e181ef540c8..d54464426591 100755 --- a/build/rbac/get-helm-rbac.sh +++ b/build/rbac/get-helm-rbac.sh @@ -6,7 +6,7 @@ set -eEuox pipefail SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" pushd "$SCRIPT_DIR" -${HELM} template ../../cluster/charts/rook-ceph \ +${HELM} template ../../deploy/charts/rook-ceph \ --namespace rook-ceph \ --set crds.enabled=false | ./keep-rbac-yaml.py > rbac.yaml diff --git a/cluster/olm/ceph/README.md b/cluster/olm/ceph/README.md deleted file mode 100644 index 865b6e3210ba..000000000000 --- a/cluster/olm/ceph/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Build Rook's CSV file - -Just run `make CSV_VERSION=1.0.0 csv-ceph` like this: - -```console -make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1 -``` - ->``` ->INFO[0000] Generating CSV manifest version 1.0.1 ->INFO[0000] Fill in the following required fields in file deploy/olm-catalog/ceph.csv.yaml: -> spec.keywords -> spec.maintainers -> spec.provider -> spec.labels ->INFO[0000] Create deploy/olm-catalog/ceph.csv.yaml ->INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml -> ->Congratulations! ->Your Rook CSV 1.0.1 file for k8s is ready at: cluster/olm/ceph/deploy/olm-catalog/rook-ceph.v1.0.1.clusterserviceversion.yaml ->Push it to https://github.com/operator-framework/community-operators as well as the CRDs files from cluster/olm/ceph/deploy/crds and the package file cluster/olm/ceph/assemble/rook-ceph.package.yaml. ->``` - -Or for OpenShift use: `make CSV_VERSION=1.0.0 CSV_PLATFORM=ocp csv-ceph`. diff --git a/cluster/charts/library/Chart.yaml b/deploy/charts/library/Chart.yaml similarity index 100% rename from cluster/charts/library/Chart.yaml rename to deploy/charts/library/Chart.yaml diff --git a/cluster/charts/library/templates/_cluster-clusterrolebinding.tpl b/deploy/charts/library/templates/_cluster-clusterrolebinding.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-clusterrolebinding.tpl rename to deploy/charts/library/templates/_cluster-clusterrolebinding.tpl diff --git a/cluster/charts/library/templates/_cluster-monitoring.tpl b/deploy/charts/library/templates/_cluster-monitoring.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-monitoring.tpl rename to deploy/charts/library/templates/_cluster-monitoring.tpl diff --git a/cluster/charts/library/templates/_cluster-psp.tpl b/deploy/charts/library/templates/_cluster-psp.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-psp.tpl rename to deploy/charts/library/templates/_cluster-psp.tpl diff --git a/cluster/charts/library/templates/_cluster-role.tpl b/deploy/charts/library/templates/_cluster-role.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-role.tpl rename to deploy/charts/library/templates/_cluster-role.tpl diff --git a/cluster/charts/library/templates/_cluster-rolebinding.tpl b/deploy/charts/library/templates/_cluster-rolebinding.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-rolebinding.tpl rename to deploy/charts/library/templates/_cluster-rolebinding.tpl diff --git a/cluster/charts/library/templates/_cluster-serviceaccount.tpl b/deploy/charts/library/templates/_cluster-serviceaccount.tpl similarity index 100% rename from cluster/charts/library/templates/_cluster-serviceaccount.tpl rename to deploy/charts/library/templates/_cluster-serviceaccount.tpl diff --git a/cluster/charts/library/templates/_imagepullsecret.tpl b/deploy/charts/library/templates/_imagepullsecret.tpl similarity index 100% rename from cluster/charts/library/templates/_imagepullsecret.tpl rename to deploy/charts/library/templates/_imagepullsecret.tpl diff --git a/cluster/charts/library/templates/_suffix-cluster-namespace.tpl b/deploy/charts/library/templates/_suffix-cluster-namespace.tpl similarity index 100% rename from cluster/charts/library/templates/_suffix-cluster-namespace.tpl rename to deploy/charts/library/templates/_suffix-cluster-namespace.tpl diff --git a/cluster/charts/rook-ceph-cluster/.helmignore b/deploy/charts/rook-ceph-cluster/.helmignore similarity index 100% rename from cluster/charts/rook-ceph-cluster/.helmignore rename to deploy/charts/rook-ceph-cluster/.helmignore diff --git a/cluster/charts/rook-ceph-cluster/Chart.yaml b/deploy/charts/rook-ceph-cluster/Chart.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/Chart.yaml rename to deploy/charts/rook-ceph-cluster/Chart.yaml diff --git a/cluster/charts/rook-ceph-cluster/README.md b/deploy/charts/rook-ceph-cluster/README.md similarity index 100% rename from cluster/charts/rook-ceph-cluster/README.md rename to deploy/charts/rook-ceph-cluster/README.md diff --git a/cluster/charts/rook-ceph-cluster/charts/library b/deploy/charts/rook-ceph-cluster/charts/library similarity index 100% rename from cluster/charts/rook-ceph-cluster/charts/library rename to deploy/charts/rook-ceph-cluster/charts/library diff --git a/cluster/charts/rook-ceph-cluster/templates/NOTES.txt b/deploy/charts/rook-ceph-cluster/templates/NOTES.txt similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/NOTES.txt rename to deploy/charts/rook-ceph-cluster/templates/NOTES.txt diff --git a/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl b/deploy/charts/rook-ceph-cluster/templates/_helpers.tpl similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/_helpers.tpl rename to deploy/charts/rook-ceph-cluster/templates/_helpers.tpl diff --git a/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml b/deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml rename to deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/cephcluster.yaml b/deploy/charts/rook-ceph-cluster/templates/cephcluster.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/cephcluster.yaml rename to deploy/charts/rook-ceph-cluster/templates/cephcluster.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml rename to deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml b/deploy/charts/rook-ceph-cluster/templates/cephobjectstore.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml rename to deploy/charts/rook-ceph-cluster/templates/cephobjectstore.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/configmap.yaml b/deploy/charts/rook-ceph-cluster/templates/configmap.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/configmap.yaml rename to deploy/charts/rook-ceph-cluster/templates/configmap.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/deployment.yaml b/deploy/charts/rook-ceph-cluster/templates/deployment.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/deployment.yaml rename to deploy/charts/rook-ceph-cluster/templates/deployment.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/ingress.yaml b/deploy/charts/rook-ceph-cluster/templates/ingress.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/ingress.yaml rename to deploy/charts/rook-ceph-cluster/templates/ingress.yaml diff --git a/cluster/charts/rook-ceph-cluster/templates/rbac.yaml b/deploy/charts/rook-ceph-cluster/templates/rbac.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/templates/rbac.yaml rename to deploy/charts/rook-ceph-cluster/templates/rbac.yaml diff --git a/cluster/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml similarity index 100% rename from cluster/charts/rook-ceph-cluster/values.yaml rename to deploy/charts/rook-ceph-cluster/values.yaml diff --git a/cluster/charts/rook-ceph/.helmignore b/deploy/charts/rook-ceph/.helmignore similarity index 100% rename from cluster/charts/rook-ceph/.helmignore rename to deploy/charts/rook-ceph/.helmignore diff --git a/cluster/charts/rook-ceph/Chart.yaml b/deploy/charts/rook-ceph/Chart.yaml similarity index 100% rename from cluster/charts/rook-ceph/Chart.yaml rename to deploy/charts/rook-ceph/Chart.yaml diff --git a/cluster/charts/rook-ceph/README.md b/deploy/charts/rook-ceph/README.md similarity index 100% rename from cluster/charts/rook-ceph/README.md rename to deploy/charts/rook-ceph/README.md diff --git a/cluster/charts/rook-ceph/charts/library b/deploy/charts/rook-ceph/charts/library similarity index 100% rename from cluster/charts/rook-ceph/charts/library rename to deploy/charts/rook-ceph/charts/library diff --git a/cluster/charts/rook-ceph/templates/NOTES.txt b/deploy/charts/rook-ceph/templates/NOTES.txt similarity index 100% rename from cluster/charts/rook-ceph/templates/NOTES.txt rename to deploy/charts/rook-ceph/templates/NOTES.txt diff --git a/cluster/charts/rook-ceph/templates/cluster-rbac.yaml b/deploy/charts/rook-ceph/templates/cluster-rbac.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/cluster-rbac.yaml rename to deploy/charts/rook-ceph/templates/cluster-rbac.yaml diff --git a/cluster/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/clusterrole.yaml rename to deploy/charts/rook-ceph/templates/clusterrole.yaml diff --git a/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/clusterrolebinding.yaml rename to deploy/charts/rook-ceph/templates/clusterrolebinding.yaml diff --git a/cluster/charts/rook-ceph/templates/deployment.yaml b/deploy/charts/rook-ceph/templates/deployment.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/deployment.yaml rename to deploy/charts/rook-ceph/templates/deployment.yaml diff --git a/cluster/charts/rook-ceph/templates/psp.yaml b/deploy/charts/rook-ceph/templates/psp.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/psp.yaml rename to deploy/charts/rook-ceph/templates/psp.yaml diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/resources.yaml rename to deploy/charts/rook-ceph/templates/resources.yaml diff --git a/cluster/charts/rook-ceph/templates/role.yaml b/deploy/charts/rook-ceph/templates/role.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/role.yaml rename to deploy/charts/rook-ceph/templates/role.yaml diff --git a/cluster/charts/rook-ceph/templates/rolebinding.yaml b/deploy/charts/rook-ceph/templates/rolebinding.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/rolebinding.yaml rename to deploy/charts/rook-ceph/templates/rolebinding.yaml diff --git a/cluster/charts/rook-ceph/templates/serviceaccount.yaml b/deploy/charts/rook-ceph/templates/serviceaccount.yaml similarity index 100% rename from cluster/charts/rook-ceph/templates/serviceaccount.yaml rename to deploy/charts/rook-ceph/templates/serviceaccount.yaml diff --git a/cluster/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml similarity index 99% rename from cluster/charts/rook-ceph/values.yaml rename to deploy/charts/rook-ceph/values.yaml index 5d5e4d675f9d..a13b7bae1444 100644 --- a/cluster/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -10,7 +10,7 @@ image: crds: # Whether the helm chart should create and update the CRDs. If false, the CRDs must be - # managed independently with cluster/examples/kubernetes/ceph/crds.yaml. + # managed independently with deploy/examples/crds.yaml. # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED. # If the CRDs are deleted in this case, see the disaster recovery guide to restore them. # https://rook.github.io/docs/rook/latest/ceph-disaster-recovery.html#restoring-crds-after-deletion diff --git a/cluster/examples/kubernetes/README.md b/deploy/examples/README.md similarity index 100% rename from cluster/examples/kubernetes/README.md rename to deploy/examples/README.md diff --git a/cluster/examples/kubernetes/ceph/bucket-notification-endpoint.yaml b/deploy/examples/bucket-notification-endpoint.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/bucket-notification-endpoint.yaml rename to deploy/examples/bucket-notification-endpoint.yaml diff --git a/cluster/examples/kubernetes/ceph/bucket-notification.yaml b/deploy/examples/bucket-notification.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/bucket-notification.yaml rename to deploy/examples/bucket-notification.yaml diff --git a/cluster/examples/kubernetes/ceph/bucket-topic.yaml b/deploy/examples/bucket-topic.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/bucket-topic.yaml rename to deploy/examples/bucket-topic.yaml diff --git a/cluster/examples/kubernetes/ceph/ceph-client.yaml b/deploy/examples/ceph-client.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/ceph-client.yaml rename to deploy/examples/ceph-client.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-external-management.yaml b/deploy/examples/cluster-external-management.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-external-management.yaml rename to deploy/examples/cluster-external-management.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-external.yaml b/deploy/examples/cluster-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-external.yaml rename to deploy/examples/cluster-external.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml b/deploy/examples/cluster-on-local-pvc.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml rename to deploy/examples/cluster-on-local-pvc.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml b/deploy/examples/cluster-on-pvc.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml rename to deploy/examples/cluster-on-pvc.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-stretched-aws.yaml b/deploy/examples/cluster-stretched-aws.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-stretched-aws.yaml rename to deploy/examples/cluster-stretched-aws.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-stretched.yaml b/deploy/examples/cluster-stretched.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-stretched.yaml rename to deploy/examples/cluster-stretched.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster-test.yaml b/deploy/examples/cluster-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster-test.yaml rename to deploy/examples/cluster-test.yaml diff --git a/cluster/examples/kubernetes/ceph/cluster.yaml b/deploy/examples/cluster.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/cluster.yaml rename to deploy/examples/cluster.yaml diff --git a/cluster/examples/kubernetes/ceph/common-external.yaml b/deploy/examples/common-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/common-external.yaml rename to deploy/examples/common-external.yaml diff --git a/cluster/examples/kubernetes/ceph/common-second-cluster.yaml b/deploy/examples/common-second-cluster.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/common-second-cluster.yaml rename to deploy/examples/common-second-cluster.yaml diff --git a/cluster/examples/kubernetes/ceph/common.yaml b/deploy/examples/common.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/common.yaml rename to deploy/examples/common.yaml diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/deploy/examples/crds.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/crds.yaml rename to deploy/examples/crds.yaml diff --git a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py similarity index 100% rename from cluster/examples/kubernetes/ceph/create-external-cluster-resources.py rename to deploy/examples/create-external-cluster-resources.py diff --git a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh b/deploy/examples/create-external-cluster-resources.sh similarity index 97% rename from cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh rename to deploy/examples/create-external-cluster-resources.sh index ad6e31d80978..cf2d9f11d49d 100644 --- a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh +++ b/deploy/examples/create-external-cluster-resources.sh @@ -2,7 +2,7 @@ # this script creates all the users/keys on the external cluster # those keys will be injected via the import-external-cluster.sh once this one is done running # so you can run import-external-cluster.sh right after this script -# run me like: . cluster/examples/kubernetes/ceph/create-external-cluster-resources.sh +# run me like: . deploy/examples/create-external-cluster-resources.sh set -e ############# @@ -26,12 +26,12 @@ function checkEnv() { echo "'ceph' binary is expected'" return 1 fi - + if ! is_available jq; then echo "'jq' binary is expected'" return 1 fi - + if ! ceph -s 1>/dev/null; then echo "cannot connect to the ceph cluster" return 1 diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml b/deploy/examples/csi/cephfs/kube-registry.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/kube-registry.yaml rename to deploy/examples/csi/cephfs/kube-registry.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml b/deploy/examples/csi/cephfs/pod-ephemeral.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml rename to deploy/examples/csi/cephfs/pod-ephemeral.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pod.yaml b/deploy/examples/csi/cephfs/pod.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/pod.yaml rename to deploy/examples/csi/cephfs/pod.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml b/deploy/examples/csi/cephfs/pvc-clone.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/pvc-clone.yaml rename to deploy/examples/csi/cephfs/pvc-clone.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml b/deploy/examples/csi/cephfs/pvc-restore.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/pvc-restore.yaml rename to deploy/examples/csi/cephfs/pvc-restore.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pvc.yaml b/deploy/examples/csi/cephfs/pvc.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/pvc.yaml rename to deploy/examples/csi/cephfs/pvc.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml b/deploy/examples/csi/cephfs/snapshot.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/snapshot.yaml rename to deploy/examples/csi/cephfs/snapshot.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml b/deploy/examples/csi/cephfs/snapshotclass.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml rename to deploy/examples/csi/cephfs/snapshotclass.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml b/deploy/examples/csi/cephfs/storageclass-ec.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml rename to deploy/examples/csi/cephfs/storageclass-ec.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml b/deploy/examples/csi/cephfs/storageclass.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml rename to deploy/examples/csi/cephfs/storageclass.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml b/deploy/examples/csi/rbd/pod-ephemeral.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml rename to deploy/examples/csi/rbd/pod-ephemeral.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pod.yaml b/deploy/examples/csi/rbd/pod.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/pod.yaml rename to deploy/examples/csi/rbd/pod.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml b/deploy/examples/csi/rbd/pvc-clone.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/pvc-clone.yaml rename to deploy/examples/csi/rbd/pvc-clone.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml b/deploy/examples/csi/rbd/pvc-restore.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/pvc-restore.yaml rename to deploy/examples/csi/rbd/pvc-restore.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pvc.yaml b/deploy/examples/csi/rbd/pvc.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/pvc.yaml rename to deploy/examples/csi/rbd/pvc.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml b/deploy/examples/csi/rbd/snapshot.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/snapshot.yaml rename to deploy/examples/csi/rbd/snapshot.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml b/deploy/examples/csi/rbd/snapshotclass.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml rename to deploy/examples/csi/rbd/snapshotclass.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml b/deploy/examples/csi/rbd/storageclass-ec.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml rename to deploy/examples/csi/rbd/storageclass-ec.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-test.yaml b/deploy/examples/csi/rbd/storageclass-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/storageclass-test.yaml rename to deploy/examples/csi/rbd/storageclass-test.yaml diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml b/deploy/examples/csi/rbd/storageclass.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml rename to deploy/examples/csi/rbd/storageclass.yaml diff --git a/cluster/examples/kubernetes/ceph/dashboard-external-http.yaml b/deploy/examples/dashboard-external-http.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/dashboard-external-http.yaml rename to deploy/examples/dashboard-external-http.yaml diff --git a/cluster/examples/kubernetes/ceph/dashboard-external-https.yaml b/deploy/examples/dashboard-external-https.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/dashboard-external-https.yaml rename to deploy/examples/dashboard-external-https.yaml diff --git a/cluster/examples/kubernetes/ceph/dashboard-ingress-https.yaml b/deploy/examples/dashboard-ingress-https.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/dashboard-ingress-https.yaml rename to deploy/examples/dashboard-ingress-https.yaml diff --git a/cluster/examples/kubernetes/ceph/dashboard-loadbalancer.yaml b/deploy/examples/dashboard-loadbalancer.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/dashboard-loadbalancer.yaml rename to deploy/examples/dashboard-loadbalancer.yaml diff --git a/cluster/examples/kubernetes/ceph/direct-mount.yaml b/deploy/examples/direct-mount.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/direct-mount.yaml rename to deploy/examples/direct-mount.yaml diff --git a/cluster/examples/kubernetes/ceph/filesystem-ec.yaml b/deploy/examples/filesystem-ec.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/filesystem-ec.yaml rename to deploy/examples/filesystem-ec.yaml diff --git a/cluster/examples/kubernetes/ceph/filesystem-mirror.yaml b/deploy/examples/filesystem-mirror.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/filesystem-mirror.yaml rename to deploy/examples/filesystem-mirror.yaml diff --git a/cluster/examples/kubernetes/ceph/filesystem-test.yaml b/deploy/examples/filesystem-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/filesystem-test.yaml rename to deploy/examples/filesystem-test.yaml diff --git a/cluster/examples/kubernetes/ceph/filesystem.yaml b/deploy/examples/filesystem.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/filesystem.yaml rename to deploy/examples/filesystem.yaml diff --git a/cluster/examples/kubernetes/ceph/images.txt b/deploy/examples/images.txt similarity index 100% rename from cluster/examples/kubernetes/ceph/images.txt rename to deploy/examples/images.txt diff --git a/cluster/examples/kubernetes/ceph/import-external-cluster.sh b/deploy/examples/import-external-cluster.sh similarity index 100% rename from cluster/examples/kubernetes/ceph/import-external-cluster.sh rename to deploy/examples/import-external-cluster.sh diff --git a/cluster/examples/kubernetes/ceph/monitoring/csi-metrics-service-monitor.yaml b/deploy/examples/monitoring/csi-metrics-service-monitor.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/csi-metrics-service-monitor.yaml rename to deploy/examples/monitoring/csi-metrics-service-monitor.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/keda-rgw.yaml b/deploy/examples/monitoring/keda-rgw.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/keda-rgw.yaml rename to deploy/examples/monitoring/keda-rgw.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml b/deploy/examples/monitoring/prometheus-ceph-v14-rules-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml rename to deploy/examples/monitoring/prometheus-ceph-v14-rules-external.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml b/deploy/examples/monitoring/prometheus-ceph-v14-rules.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml rename to deploy/examples/monitoring/prometheus-ceph-v14-rules.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules-external.yaml b/deploy/examples/monitoring/prometheus-ceph-v15-rules-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules-external.yaml rename to deploy/examples/monitoring/prometheus-ceph-v15-rules-external.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules.yaml b/deploy/examples/monitoring/prometheus-ceph-v15-rules.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v15-rules.yaml rename to deploy/examples/monitoring/prometheus-ceph-v15-rules.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules-external.yaml b/deploy/examples/monitoring/prometheus-ceph-v16-rules-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules-external.yaml rename to deploy/examples/monitoring/prometheus-ceph-v16-rules-external.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules.yaml b/deploy/examples/monitoring/prometheus-ceph-v16-rules.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v16-rules.yaml rename to deploy/examples/monitoring/prometheus-ceph-v16-rules.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml b/deploy/examples/monitoring/prometheus-service.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml rename to deploy/examples/monitoring/prometheus-service.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml b/deploy/examples/monitoring/prometheus.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml rename to deploy/examples/monitoring/prometheus.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/rbac.yaml b/deploy/examples/monitoring/rbac.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/rbac.yaml rename to deploy/examples/monitoring/rbac.yaml diff --git a/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml b/deploy/examples/monitoring/service-monitor.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml rename to deploy/examples/monitoring/service-monitor.yaml diff --git a/cluster/examples/kubernetes/mysql.yaml b/deploy/examples/mysql.yaml similarity index 100% rename from cluster/examples/kubernetes/mysql.yaml rename to deploy/examples/mysql.yaml diff --git a/cluster/examples/kubernetes/ceph/nfs-test.yaml b/deploy/examples/nfs-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/nfs-test.yaml rename to deploy/examples/nfs-test.yaml diff --git a/cluster/examples/kubernetes/ceph/nfs.yaml b/deploy/examples/nfs.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/nfs.yaml rename to deploy/examples/nfs.yaml diff --git a/cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml b/deploy/examples/object-bucket-claim-delete.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-bucket-claim-delete.yaml rename to deploy/examples/object-bucket-claim-delete.yaml diff --git a/cluster/examples/kubernetes/ceph/object-bucket-claim-notification.yaml b/deploy/examples/object-bucket-claim-notification.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-bucket-claim-notification.yaml rename to deploy/examples/object-bucket-claim-notification.yaml diff --git a/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml b/deploy/examples/object-bucket-claim-retain.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml rename to deploy/examples/object-bucket-claim-retain.yaml diff --git a/cluster/examples/kubernetes/ceph/object-ec.yaml b/deploy/examples/object-ec.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-ec.yaml rename to deploy/examples/object-ec.yaml diff --git a/cluster/examples/kubernetes/ceph/object-external.yaml b/deploy/examples/object-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-external.yaml rename to deploy/examples/object-external.yaml diff --git a/cluster/examples/kubernetes/ceph/object-multisite-pull-realm-test.yaml b/deploy/examples/object-multisite-pull-realm-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-multisite-pull-realm-test.yaml rename to deploy/examples/object-multisite-pull-realm-test.yaml diff --git a/cluster/examples/kubernetes/ceph/object-multisite-pull-realm.yaml b/deploy/examples/object-multisite-pull-realm.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-multisite-pull-realm.yaml rename to deploy/examples/object-multisite-pull-realm.yaml diff --git a/cluster/examples/kubernetes/ceph/object-multisite-test.yaml b/deploy/examples/object-multisite-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-multisite-test.yaml rename to deploy/examples/object-multisite-test.yaml diff --git a/cluster/examples/kubernetes/ceph/object-multisite.yaml b/deploy/examples/object-multisite.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-multisite.yaml rename to deploy/examples/object-multisite.yaml diff --git a/cluster/examples/kubernetes/ceph/object-openshift.yaml b/deploy/examples/object-openshift.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-openshift.yaml rename to deploy/examples/object-openshift.yaml diff --git a/cluster/examples/kubernetes/ceph/object-test.yaml b/deploy/examples/object-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-test.yaml rename to deploy/examples/object-test.yaml diff --git a/cluster/examples/kubernetes/ceph/object-user.yaml b/deploy/examples/object-user.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object-user.yaml rename to deploy/examples/object-user.yaml diff --git a/cluster/examples/kubernetes/ceph/object.yaml b/deploy/examples/object.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/object.yaml rename to deploy/examples/object.yaml diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/operator-openshift.yaml rename to deploy/examples/operator-openshift.yaml diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/deploy/examples/operator.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/operator.yaml rename to deploy/examples/operator.yaml diff --git a/cluster/examples/kubernetes/ceph/osd-purge.yaml b/deploy/examples/osd-purge.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/osd-purge.yaml rename to deploy/examples/osd-purge.yaml diff --git a/cluster/examples/kubernetes/ceph/pool-ec.yaml b/deploy/examples/pool-ec.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/pool-ec.yaml rename to deploy/examples/pool-ec.yaml diff --git a/cluster/examples/kubernetes/ceph/pool-mirrored.yaml b/deploy/examples/pool-mirrored.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/pool-mirrored.yaml rename to deploy/examples/pool-mirrored.yaml diff --git a/cluster/examples/kubernetes/ceph/pool-test.yaml b/deploy/examples/pool-test.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/pool-test.yaml rename to deploy/examples/pool-test.yaml diff --git a/cluster/examples/kubernetes/ceph/pool.yaml b/deploy/examples/pool.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/pool.yaml rename to deploy/examples/pool.yaml diff --git a/cluster/examples/kubernetes/ceph/rbdmirror.yaml b/deploy/examples/rbdmirror.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/rbdmirror.yaml rename to deploy/examples/rbdmirror.yaml diff --git a/cluster/examples/kubernetes/ceph/rgw-external.yaml b/deploy/examples/rgw-external.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/rgw-external.yaml rename to deploy/examples/rgw-external.yaml diff --git a/cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml b/deploy/examples/storageclass-bucket-delete.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/storageclass-bucket-delete.yaml rename to deploy/examples/storageclass-bucket-delete.yaml diff --git a/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml b/deploy/examples/storageclass-bucket-retain.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml rename to deploy/examples/storageclass-bucket-retain.yaml diff --git a/cluster/examples/kubernetes/ceph/toolbox-job.yaml b/deploy/examples/toolbox-job.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/toolbox-job.yaml rename to deploy/examples/toolbox-job.yaml diff --git a/cluster/examples/kubernetes/ceph/toolbox.yaml b/deploy/examples/toolbox.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/toolbox.yaml rename to deploy/examples/toolbox.yaml diff --git a/cluster/examples/kubernetes/ceph/volume-replication-class.yaml b/deploy/examples/volume-replication-class.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/volume-replication-class.yaml rename to deploy/examples/volume-replication-class.yaml diff --git a/cluster/examples/kubernetes/ceph/volume-replication.yaml b/deploy/examples/volume-replication.yaml similarity index 100% rename from cluster/examples/kubernetes/ceph/volume-replication.yaml rename to deploy/examples/volume-replication.yaml diff --git a/cluster/examples/kubernetes/wordpress.yaml b/deploy/examples/wordpress.yaml similarity index 100% rename from cluster/examples/kubernetes/wordpress.yaml rename to deploy/examples/wordpress.yaml diff --git a/deploy/olm/README.md b/deploy/olm/README.md new file mode 100644 index 000000000000..d8003698645b --- /dev/null +++ b/deploy/olm/README.md @@ -0,0 +1,24 @@ +# Build Rook's CSV file + +Just run `make CSV_VERSION=1.0.0 csv-ceph` like this: + +```console +make csv-ceph CSV_VERSION=1.0.1 CSV_PLATFORM=k8s ROOK_OP_VERSION=rook/ceph:v1.0.1 +``` + +> ``` +> INFO[0000] Generating CSV manifest version 1.0.1 +> INFO[0000] Fill in the following required fields in file deploy/olm-catalog/ceph.csv.yaml: +> spec.keywords +> spec.maintainers +> spec.provider +> spec.labels +> INFO[0000] Create deploy/olm-catalog/ceph.csv.yaml +> INFO[0000] Create deploy/olm-catalog/_generated.concat_crd.yaml +> +> Congratulations! +> Your Rook CSV 1.0.1 file for k8s is ready at: deploy/olm/deploy/olm-catalog/rook-ceph.v1.0.1.clusterserviceversion.yaml +> Push it to https://github.com/operator-framework/community-operators as well as the CRDs files from deploy/olm/deploy/crds and the package file deploy/olm/assemble/rook-ceph.package.yaml. +> ``` + +Or for OpenShift use: `make CSV_VERSION=1.0.0 CSV_PLATFORM=ocp csv-ceph`. diff --git a/cluster/olm/ceph/assemble/metadata-common.yaml b/deploy/olm/assemble/metadata-common.yaml similarity index 100% rename from cluster/olm/ceph/assemble/metadata-common.yaml rename to deploy/olm/assemble/metadata-common.yaml diff --git a/cluster/olm/ceph/assemble/metadata-k8s.yaml b/deploy/olm/assemble/metadata-k8s.yaml similarity index 100% rename from cluster/olm/ceph/assemble/metadata-k8s.yaml rename to deploy/olm/assemble/metadata-k8s.yaml diff --git a/cluster/olm/ceph/assemble/metadata-ocp.yaml b/deploy/olm/assemble/metadata-ocp.yaml similarity index 100% rename from cluster/olm/ceph/assemble/metadata-ocp.yaml rename to deploy/olm/assemble/metadata-ocp.yaml diff --git a/cluster/olm/ceph/assemble/metadata-okd.yaml b/deploy/olm/assemble/metadata-okd.yaml similarity index 100% rename from cluster/olm/ceph/assemble/metadata-okd.yaml rename to deploy/olm/assemble/metadata-okd.yaml diff --git a/cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml b/deploy/olm/assemble/objectbucket.io_objectbucketclaims.yaml similarity index 100% rename from cluster/olm/ceph/assemble/objectbucket.io_objectbucketclaims.yaml rename to deploy/olm/assemble/objectbucket.io_objectbucketclaims.yaml diff --git a/cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml b/deploy/olm/assemble/objectbucket.io_objectbuckets.yaml similarity index 100% rename from cluster/olm/ceph/assemble/objectbucket.io_objectbuckets.yaml rename to deploy/olm/assemble/objectbucket.io_objectbuckets.yaml diff --git a/cluster/olm/ceph/assemble/rook-ceph.package.yaml b/deploy/olm/assemble/rook-ceph.package.yaml similarity index 100% rename from cluster/olm/ceph/assemble/rook-ceph.package.yaml rename to deploy/olm/assemble/rook-ceph.package.yaml diff --git a/cluster/olm/ceph/generate-rook-csv-templates.sh b/deploy/olm/generate-rook-csv-templates.sh similarity index 90% rename from cluster/olm/ceph/generate-rook-csv-templates.sh rename to deploy/olm/generate-rook-csv-templates.sh index d6043cb2e4ec..9ff09b6a8591 100755 --- a/cluster/olm/ceph/generate-rook-csv-templates.sh +++ b/deploy/olm/generate-rook-csv-templates.sh @@ -12,7 +12,7 @@ if [ -f "Dockerfile" ]; then cd ../../ fi -: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" +: "${OLM_CATALOG_DIR:=deploy/olm}" DEPLOY_DIR="$OLM_CATALOG_DIR/deploy" CRDS_DIR="$DEPLOY_DIR/crds" @@ -28,7 +28,7 @@ function generate_template() { rm -rf $csv_manifest_path # v9999.9999.9999 is just a placeholder. operator-sdk requires valid semver here. - (cluster/olm/ceph/generate-rook-csv.sh "9999.9999.9999" $provider "{{.RookOperatorImage}}") + (deploy/olm/generate-rook-csv.sh "9999.9999.9999" $provider "{{.RookOperatorImage}}") mv $tmp_csv_gen_file $csv_template_file # replace the placeholder with the templated value diff --git a/cluster/olm/ceph/generate-rook-csv.sh b/deploy/olm/generate-rook-csv.sh similarity index 97% rename from cluster/olm/ceph/generate-rook-csv.sh rename to deploy/olm/generate-rook-csv.sh index 263d4399ce3d..82d6eea381a6 100755 --- a/cluster/olm/ceph/generate-rook-csv.sh +++ b/deploy/olm/generate-rook-csv.sh @@ -4,7 +4,7 @@ set -e ################## # INIT VARIABLES # ################## -: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" +: "${OLM_CATALOG_DIR:=deploy/olm}" ASSEMBLE_FILE_COMMON="$OLM_CATALOG_DIR/assemble/metadata-common.yaml" ASSEMBLE_FILE_K8S="$OLM_CATALOG_DIR/assemble/metadata-k8s.yaml" ASSEMBLE_FILE_OCP="$OLM_CATALOG_DIR/assemble/metadata-ocp.yaml" @@ -81,9 +81,9 @@ YQ_CMD_DELETE=($yq delete -i) YQ_CMD_MERGE_OVERWRITE=($yq merge --inplace --overwrite --prettyPrint) YQ_CMD_MERGE=($yq merge --inplace --append -P ) YQ_CMD_WRITE=($yq write --inplace -P ) -OPERATOR_YAML_FILE_K8S="cluster/examples/kubernetes/ceph/operator.yaml" -OPERATOR_YAML_FILE_OCP="cluster/examples/kubernetes/ceph/operator-openshift.yaml" -COMMON_YAML_FILE="cluster/examples/kubernetes/ceph/common.yaml" +OPERATOR_YAML_FILE_K8S="deploy/examples/operator.yaml" +OPERATOR_YAML_FILE_OCP="deploy/examples/operator-openshift.yaml" +COMMON_YAML_FILE="deploy/examples/common.yaml" CSV_PATH="$OLM_CATALOG_DIR/deploy/olm-catalog/${PLATFORM}/${VERSION}" CSV_BUNDLE_PATH="${CSV_PATH}/manifests" CSV_FILE_NAME="$CSV_BUNDLE_PATH/ceph.clusterserviceversion.yaml" @@ -92,7 +92,7 @@ OLM_OPERATOR_YAML_FILE="$OLM_CATALOG_DIR/deploy/operator.yaml" OLM_ROLE_YAML_FILE="$OLM_CATALOG_DIR/deploy/role.yaml" OLM_ROLE_BINDING_YAML_FILE="$OLM_CATALOG_DIR/deploy/role_binding.yaml" OLM_SERVICE_ACCOUNT_YAML_FILE="$OLM_CATALOG_DIR/deploy/service_account.yaml" -CEPH_EXTERNAL_SCRIPT_FILE="cluster/examples/kubernetes/ceph/create-external-cluster-resources.py" +CEPH_EXTERNAL_SCRIPT_FILE="deploy/examples/create-external-cluster-resources.py" if [[ -d "$CSV_BUNDLE_PATH" ]]; then echo "$CSV_BUNDLE_PATH already exists, not doing anything." @@ -115,6 +115,7 @@ function cleanup() { function generate_csv(){ pushd "$OLM_CATALOG_DIR" &> /dev/null "${OP_SDK_CMD[@]}" "$VERSION" + mv "$CSV_BUNDLE_PATH/olm.clusterserviceversion.yaml" "$CSV_FILE_NAME" popd &> /dev/null # cleanup to get the expected state before merging the real data from assembles diff --git a/design/ceph/ceph-nfs-ganesha.md b/design/ceph/ceph-nfs-ganesha.md index 6619e3443514..1b6c39355477 100644 --- a/design/ceph/ceph-nfs-ganesha.md +++ b/design/ceph/ceph-nfs-ganesha.md @@ -40,13 +40,12 @@ This allows the NFS-Ganesha server cluster to be scalable and highly available. exported by the NFS-Ganesha server cluster. e.g., ``` - kubectl create -f cluster/examples/kubernetes/ceph/filesystem.yaml + kubectl create -f deploy/examples/filesystem.yaml ``` - An existing RADOS pool (e.g., CephFS's data pool) or a pool created with a [Ceph Pool CRD] to store NFS client recovery data. - ### Ceph NFS-Ganesha CRD The NFS-Ganesha CRD will specify the following: diff --git a/design/ceph/filesystem.md b/design/ceph/filesystem.md index 10f4cea6a2ca..57b405e64dd6 100644 --- a/design/ceph/filesystem.md +++ b/design/ceph/filesystem.md @@ -73,7 +73,7 @@ Multiple data pools can be configured for the file system. Assigning users or fi The metadata server settings correspond to the MDS service. - `activeCount`: The number of active MDS instances. As load increases, CephFS will automatically partition the file system across the MDS instances. Rook will create double the number of MDS instances as requested by the active count. The extra instances will be in standby mode for failover. - `activeStandby`: If true, the extra MDS instances will be in active standby mode and will keep a warm cache of the file system metadata for faster failover. The instances will be assigned by CephFS in failover pairs. If false, the extra MDS instances will all be on passive standby mode and will not maintain a warm cache of the metadata. -- `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, `podAntiAffinity`, and `topologySpreadConstraints` similar to placement defined for daemons configured by the [cluster CRD](/cluster/examples/kubernetes/ceph/cluster.yaml). +- `placement`: The mds pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, `podAntiAffinity`, and `topologySpreadConstraints` similar to placement defined for daemons configured by the [cluster CRD](/deploy/examples/cluster.yaml). ```yaml metadataServer: diff --git a/design/ceph/object/store.md b/design/ceph/object/store.md index bce028ff99b7..8bee7050b2a5 100644 --- a/design/ceph/object/store.md +++ b/design/ceph/object/store.md @@ -92,7 +92,7 @@ The gateway settings correspond to the RGW service. - `port`: The service port where the RGW service will be listening (http) - `securePort`: The service port where the RGW service will be listening (https) - `instances`: The number of RGW pods that will be started for this object store -- `placement`: The rgw pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, `podAntiAffinity`, and `topologySpreadConstraints` similar to placement defined for daemons configured by the [cluster CRD](/cluster/examples/kubernetes/ceph/cluster.yaml). +- `placement`: The rgw pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, `podAntiAffinity`, and `topologySpreadConstraints` similar to placement defined for daemons configured by the [cluster CRD](/deploy/examples/cluster.yaml). The RGW service can be configured to listen on both http and https by specifying both `port` and `securePort`. diff --git a/design/common/object-bucket.md b/design/common/object-bucket.md index db2bb118c497..0afcaddd80e5 100644 --- a/design/common/object-bucket.md +++ b/design/common/object-bucket.md @@ -2,9 +2,10 @@ ## Overview -An object store bucket is a container holding immutable objects. The Rook-Ceph [operator](https://github.com/yard-turkey/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) creates a controller which automates the provisioning of new and existing buckets. +An object store bucket is a container holding immutable objects. The Rook-Ceph [operator](https://github.com/yard-turkey/rook/blob/master/deploy/examples/operator.yaml) creates a controller which automates the provisioning of new and existing buckets. A user requests bucket storage by creating an _ObjectBucketClaim_ (OBC). Upon detecting a new OBC, the Rook-Ceph bucket provisioner does the following: + - creates a new bucket and grants user-level access (greenfield), or - grants user-level access to an existing bucket (brownfield), and - creates a Kubernetes Secret in the same namespace as the OBC @@ -20,14 +21,14 @@ We welcome contributions! In the meantime, features that are not yet implemented - A Rook storage cluster must be configured and running in Kubernetes. In this example, it is assumed the cluster is in the `rook` namespace. - The following resources, or equivalent, need to be created: - - [crd](/cluster/examples/kubernetes/ceph/crds.yaml) - - [common](/cluster/examples/kubernetes/ceph/common.yaml) - - [operator](/cluster/examples/kubernetes/ceph/operator.yaml) - - [cluster](/cluster/examples/kubernetes/ceph/cluster-test.yaml) - - [object](/cluster/examples/kubernetes/ceph/object-test.yaml) - - [user](/cluster/examples/kubernetes/ceph/object-user.yaml) - - [storageclass](/cluster/examples/kubernetes/ceph/storageclass-bucket-retain.yaml) - - [claim](/cluster/examples/kubernetes/ceph/object-bucket-claim-retain.yaml) + - [crd](/deploy/examples/crds.yaml) + - [common](/deploy/examples/common.yaml) + - [operator](/deploy/examples/operator.yaml) + - [cluster](/deploy/examples/cluster-test.yaml) + - [object](/deploy/examples/object-test.yaml) + - [user](/deploy/examples/object-user.yaml) + - [storageclass](/deploy/examples/storageclass-bucket-retain.yaml) + - [claim](/deploy/examples/object-bucket-claim-retain.yaml) ## Object Store Bucket Walkthrough @@ -114,7 +115,7 @@ The gateway settings correspond to the RGW service. - `securePort`: The service port where the RGW service will be listening (https) - `instances`: The number of RGW pods that will be started for this object store (ignored if allNodes=true) - `allNodes`: Whether all nodes in the cluster should run RGW as a daemonset -- `placement`: The rgw pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](/cluster/examples/kubernetes/ceph/cluster.yaml). +- `placement`: The rgw pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](/deploy/examples/cluster.yaml). The RGW service can be configured to listen on both http and https by specifying both `port` and `securePort`. diff --git a/images/ceph/Makefile b/images/ceph/Makefile index 56683fe75e2e..bf1a2dfc3f52 100755 --- a/images/ceph/Makefile +++ b/images/ceph/Makefile @@ -29,7 +29,7 @@ OPERATOR_SDK_VERSION = v0.17.1 # TODO: update to yq v4 - v3 end of life in Aug 2021 ; v4 removes the 'yq delete' cmd and changes syntax YQ_VERSION = 3.3.0 GOHOST := GOOS=$(GOHOSTOS) GOARCH=$(GOHOSTARCH) go -MANIFESTS_DIR=../../cluster/examples/kubernetes/ceph +MANIFESTS_DIR=../../deploy/examples TEMP := $(shell mktemp -d) @@ -80,12 +80,12 @@ do.build: @cp -r $(MANIFESTS_DIR)/monitoring $(TEMP)/ceph-monitoring @mkdir -p $(TEMP)/rook-external/test-data @cp $(MANIFESTS_DIR)/create-external-cluster-resources.* $(TEMP)/rook-external/ - @cp $(MANIFESTS_DIR)/test-data/ceph-status-out $(TEMP)/rook-external/test-data/ + @cp ../../tests/ceph-status-out $(TEMP)/rook-external/test-data/ ifeq ($(INCLUDE_CSV_TEMPLATES),true) @$(MAKE) CSV_TEMPLATE_DIR=$(TEMP) generate-csv-templates - @$(MAKE) CRD_TEMPLATE_DIR=$(TEMP)/cluster/olm/ceph/templates/crds/ get-volume-replication-crds - @cp -r $(TEMP)/cluster/olm/ceph/templates $(TEMP)/ceph-csv-templates + @$(MAKE) CRD_TEMPLATE_DIR=$(TEMP)/deploy/olm/templates/crds/ get-volume-replication-crds + @cp -r $(TEMP)/deploy/olm/templates $(TEMP)/ceph-csv-templates else mkdir $(TEMP)/ceph-csv-templates endif @@ -107,19 +107,19 @@ generate-csv-templates: $(OPERATOR_SDK) $(YQ) ## Generate CSV templates for OLM @# then, generate or copy all prerequisites into CSV_TEMPLATE_DIR (e.g., CRDs) @# finally, generate the templates in-place using CSV_TEMPLATE_DIR as a staging dir @mkdir -p $(CSV_TEMPLATE_DIR) - @cp -a ../../cluster $(CSV_TEMPLATE_DIR)/cluster + @cp -a ../../deploy $(CSV_TEMPLATE_DIR)/deploy @set -eE;\ BEFORE_GEN_CRD_SIZE=$$(wc -l < $(MANIFESTS_DIR)/crds.yaml);\ $(MAKE) -C ../.. NO_OB_OBC_VOL_GEN=true MAX_DESC_LEN=0 BUILD_CRDS_INTO_DIR=$(CSV_TEMPLATE_DIR) crds;\ - AFTER_GEN_CRD_SIZE=$$(wc -l < $(CSV_TEMPLATE_DIR)/cluster/examples/kubernetes/ceph/crds.yaml);\ + AFTER_GEN_CRD_SIZE=$$(wc -l < $(CSV_TEMPLATE_DIR)/deploy/examples/crds.yaml);\ if [ "$$BEFORE_GEN_CRD_SIZE" -le "$$AFTER_GEN_CRD_SIZE" ]; then\ echo "the new crd file must be smaller since the description fields were stripped!";\ echo "length before $$BEFORE_GEN_CRD_SIZE";\ echo "length after $$AFTER_GEN_CRD_SIZE";\ exit 1;\ fi - @OLM_CATALOG_DIR=$(CSV_TEMPLATE_DIR)/cluster/olm/ceph ../../cluster/olm/ceph/generate-rook-csv-templates.sh - @echo " === Generated CSV templates can be found at $(CSV_TEMPLATE_DIR)/cluster/olm/ceph/templates" + @OLM_CATALOG_DIR=$(CSV_TEMPLATE_DIR)/deploy/olm ../../deploy/olm/generate-rook-csv-templates.sh + @echo " === Generated CSV templates can be found at $(CSV_TEMPLATE_DIR)/deploy/olm/templates" get-volume-replication-crds: @if [[ -z "$(CRD_TEMPLATE_DIR)" ]]; then echo "CRD_TEMPLATE_DIR is not set"; exit 1; fi @@ -146,10 +146,10 @@ $(OPERATOR_SDK): csv: $(OPERATOR_SDK) $(YQ) ## Generate a CSV file for OLM. @echo Generating CSV manifests - @cd ../.. && cluster/olm/ceph/generate-rook-csv.sh $(CSV_VERSION) $(CSV_PLATFORM) $(ROOK_OP_VERSION) + @cd ../.. && deploy/olm/generate-rook-csv.sh $(CSV_VERSION) $(CSV_PLATFORM) $(ROOK_OP_VERSION) csv-clean: $(OPERATOR_SDK) $(YQ) ## Remove existing OLM files. - @rm -fr ../../cluster/olm/ceph/deploy/* ../../cluster/olm/ceph/templates/* + @rm -fr ../../deploy/olm/deploy/* ../../deploy/olm/templates/* # reading from a file and outputting to the same file can have undefined results, so use this intermediate IMAGE_TMP="/tmp/rook-ceph-image-list" diff --git a/pkg/operator/k8sutil/prometheus_test.go b/pkg/operator/k8sutil/prometheus_test.go index 5910bb5a6081..48fc2cb6941d 100644 --- a/pkg/operator/k8sutil/prometheus_test.go +++ b/pkg/operator/k8sutil/prometheus_test.go @@ -27,7 +27,7 @@ import ( func TestGetServiceMonitor(t *testing.T) { projectRoot := util.PathToProjectRoot() - filePath := path.Join(projectRoot, "/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml") + filePath := path.Join(projectRoot, "/deploy/examples/monitoring/service-monitor.yaml") servicemonitor, err := GetServiceMonitor(filePath) assert.Nil(t, err) assert.Equal(t, "rook-ceph-mgr", servicemonitor.GetName()) @@ -39,7 +39,7 @@ func TestGetServiceMonitor(t *testing.T) { func TestGetPrometheusRule(t *testing.T) { projectRoot := util.PathToProjectRoot() - filePath := path.Join(projectRoot, "/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml") + filePath := path.Join(projectRoot, "/deploy/examples/monitoring/prometheus-ceph-v14-rules.yaml") rules, err := GetPrometheusRule(filePath) assert.Nil(t, err) assert.Equal(t, "prometheus-ceph-rules", rules.GetName()) diff --git a/cluster/examples/kubernetes/ceph/test-data/ceph-status-out b/tests/ceph-status-out similarity index 100% rename from cluster/examples/kubernetes/ceph/test-data/ceph-status-out rename to tests/ceph-status-out diff --git a/tests/framework/installer/ceph_settings.go b/tests/framework/installer/ceph_settings.go index 8d4a2daae25d..c93bb4c03661 100644 --- a/tests/framework/installer/ceph_settings.go +++ b/tests/framework/installer/ceph_settings.go @@ -62,7 +62,7 @@ func (s *TestCephSettings) ApplyEnvVars() { } func (s *TestCephSettings) readManifest(filename string) string { - manifest := readManifest("ceph", filename) + manifest := readManifest(filename) return replaceNamespaces(manifest, manifest, s.OperatorNamespace, s.Namespace) } @@ -71,7 +71,7 @@ func (s *TestCephSettings) readManifestFromGithub(filename string) string { } func (s *TestCephSettings) readManifestFromGithubWithClusterNamespace(filename, clusterNamespace string) string { - manifest := readManifestFromGithub(s.RookVersion, "ceph", filename) + manifest := readManifestFromGithub(s.RookVersion, filename) return replaceNamespaces(filename, manifest, s.OperatorNamespace, clusterNamespace) } diff --git a/tests/framework/installer/settings.go b/tests/framework/installer/settings.go index 4b268b2fbf09..9b9e43060db6 100644 --- a/tests/framework/installer/settings.go +++ b/tests/framework/installer/settings.go @@ -1,12 +1,9 @@ /* Copyright 2021 The Rook Authors. All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -30,12 +27,12 @@ import ( var imageMatch = regexp.MustCompile(`image: rook\/ceph:[a-z0-9.-]+`) -func readManifest(provider, filename string) string { +func readManifest(filename string) string { rootDir, err := utils.FindRookRoot() if err != nil { panic(err) } - manifest := path.Join(rootDir, "cluster/examples/kubernetes/", provider, filename) + manifest := path.Join(rootDir, "deploy/examples/", filename) logger.Infof("Reading manifest: %s", manifest) contents, err := ioutil.ReadFile(manifest) if err != nil { @@ -44,8 +41,16 @@ func readManifest(provider, filename string) string { return imageMatch.ReplaceAllString(string(contents), "image: rook/ceph:"+LocalBuildTag) } -func readManifestFromGithub(rookVersion, provider, filename string) string { - url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s/cluster/examples/kubernetes/%s/%s", rookVersion, provider, filename) +func buildURL(rookVersion, filename string) string { + re := regexp.MustCompile(`(?m)^v1.[6-7].[0-9]{1,2}$`) + for range re.FindAllString(rookVersion, -1) { + return fmt.Sprintf("%s/cluster/examples/kubernetes/ceph/%s", rookVersion, filename) + } + return fmt.Sprintf("%s/deploy/examples/%s", rookVersion, filename) +} + +func readManifestFromGithub(rookVersion, filename string) string { + url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s", buildURL(rookVersion, filename)) return readManifestFromURL(url) } diff --git a/tests/framework/utils/helm_helper.go b/tests/framework/utils/helm_helper.go index c9163af61ea7..b28264ef7be5 100644 --- a/tests/framework/utils/helm_helper.go +++ b/tests/framework/utils/helm_helper.go @@ -18,11 +18,12 @@ package utils import ( "fmt" - "gopkg.in/yaml.v2" "os" "path" "path/filepath" + "gopkg.in/yaml.v2" + "github.com/pkg/errors" "github.com/rook/rook/pkg/util/exec" ) @@ -86,7 +87,7 @@ func (h *HelmHelper) InstallLocalRookHelmChart(namespace, chart string, values m if err != nil { return errors.Wrap(err, "failed to find rook root") } - chartDir := path.Join(rootDir, fmt.Sprintf("cluster/charts/%s/", chart)) + chartDir := path.Join(rootDir, fmt.Sprintf("deploy/charts/%s/", chart)) cmdArgs := []string{"install", "--create-namespace", chart, chartDir} if namespace != "" { cmdArgs = append(cmdArgs, "--namespace", namespace) diff --git a/tests/scripts/auto-grow-storage.sh b/tests/scripts/auto-grow-storage.sh index 15a6a264970b..51c10e23212a 100755 --- a/tests/scripts/auto-grow-storage.sh +++ b/tests/scripts/auto-grow-storage.sh @@ -7,7 +7,7 @@ function calculateSize() { local currentsize=$2 local unit=$1 - rawsizeValue=0 # rawsizeValue is a global variable + rawsizeValue=0 # rawsizeValue is a global variable if [[ "$currentsize" == *"Mi" ]] then @@ -42,7 +42,7 @@ function compareSizes() { then return "1" fi - return "0" + return "0" } function growVertically() { @@ -53,7 +53,7 @@ function growVertically() { local currentSize currentSize=$(kubectl get pvc "${pvc}" -n "${ns}" -o json | jq -r '.spec.resources.requests.storage') echo "PVC(OSD) current size is ${currentSize} and will be increased by ${growRate}%." - + calculateSize "${pvc}" "${currentSize}" # rawSize is calculated and used for further process if ! [[ "${rawSize}" =~ ^[0-9]+$ ]] @@ -68,7 +68,7 @@ function growVertically() { else echo "New calculated size for the PVC is ${newSize}${unitSize}" fi - + compareSizes ${newSize}${unitSize} "${maxSize}" if [ "1" = $? ] then @@ -79,14 +79,14 @@ function growVertically() { result=$(kubectl patch pvc "${pvc}" -n "${ns}" --type json --patch "[{ op: replace, path: /spec/resources/requests/storage, value: ${newSize}${unitSize} }]") fi echo "${result}" - fi + fi } function growHorizontally() { local increaseOSDCount=$1 local pvc=$2 local ns=$3 - local maxOSDCount=$4 + local maxOSDCount=$4 local deviceSetName local cluster="" local deviceSet="" @@ -116,7 +116,7 @@ function growHorizontally() { echo "${result}" break fi - deviceSetCount=$((deviceSetCount+1)) + deviceSetCount=$((deviceSetCount+1)) deviceSet=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}].spec.storage.storageClassDeviceSets[${deviceSetCount}].name") done clusterCount=$((clusterCount+1)) @@ -126,7 +126,7 @@ function growHorizontally() { function growOSD(){ itr=0 - alertmanagerroute=$(kubectl -n rook-ceph -o jsonpath="{.status.hostIP}" get pod prometheus-rook-prometheus-0) + alertmanagerroute=$(kubectl -n rook-ceph -o jsonpath="{.status.hostIP}" get pod prometheus-rook-prometheus-0) route=${alertmanagerroute}:30900 toolbox=$(kubectl get pods -n rook-ceph | grep -i rook-ceph-tools | awk '{ print $1 }') alerts=$(kubectl exec -it "${toolbox}" -n rook-ceph -- bash -c "curl -s http://${route}/api/v1/alerts") @@ -153,17 +153,17 @@ function growOSD(){ pvc=$(kubectl get deployment -n "${ns}" rook-ceph-"${osdID}" -o json | jq -r '.metadata.labels."ceph.rook.io/pvc"') if [[ $pvc == null ]] then - echo "PVC not found, script can only run on PVC-based cluster" + echo "PVC not found, script can only run on PVC-based cluster" exit 1 fi echo "Processing NearFull or Full alert for PVC ${pvc} in namespace ${ns}" if [[ $1 == "count" ]] - then + then growHorizontally "$2" "${pvc}" "${ns}" "$3" else growVertically "$2" "${pvc}" "${ns}" "$3" fi - fi + fi (( itr = itr + 1 )) if [[ "${itr}" == "${total_alerts}" ]] || [[ "${total_alerts}" == "0" ]] then @@ -172,24 +172,24 @@ function growOSD(){ total_alerts=$( jq '.data.alerts | length' <<< "${alerts}") itr=0 echo "Looping at $(date +"%Y-%m-%d %H:%M:%S")" - fi + fi done } function creatingPrerequisites(){ echo "creating Prerequisites deployments - Prometheus Operator and Prometheus Instances" - # creating Prometheus operator + # creating Prometheus operator kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.40.0/bundle.yaml # waitng for Prometheus operator to get ready timeout 30 sh -c "until [ $(kubectl get pod -l app.kubernetes.'io/name'=prometheus-operator -o json | jq -r '.items[0].status.phase') = Running ]; do echo 'waiting for prometheus-operator to get created' && sleep 1; done" # creating a service monitor that will watch the Rook cluster and collect metrics regularly - kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/monitoring/service-monitor.yaml # create the PrometheusRule for Rook alerts. - kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/monitoring/prometheus-ceph-v14-rules.yaml # create prometheus-rook-prometheus-0 pod - kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/monitoring/prometheus.yaml # create prometheus-service - kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/monitoring/prometheus-service.yaml # waitng for prometheus-rook-prometheus-0 pod to get ready timeout 60 sh -c "until [ $(kubectl get pod -l prometheus=rook-prometheus -nrook-ceph -o json | jq -r '.items[0].status.phase') = Running ]; do echo 'waiting for prometheus-rook-prometheus-0 pod to get created' && sleep 1; done" if [ "$(kubectl get pod -l prometheus=rook-prometheus -nrook-ceph)" == "" ] @@ -221,14 +221,14 @@ count) then echo "maxCount should be an integer" invalidCall - exit 1 + exit 1 fi if ! [[ "${count}" =~ ^[0-9]+$ ]] then echo "rate should be an integer" invalidCall - exit 1 - fi + exit 1 + fi creatingPrerequisites echo "Adding on nearfull and full alert and number of OSD to add is ${count}" growOSD count "${count}" "${max}" @@ -245,14 +245,14 @@ size) then echo "maxSize should be an string" invalidCall - exit 1 + exit 1 fi if ! [[ "${growRate}" =~ ^[0-9]+$ ]] then echo "growth-rate should be an integer" invalidCall - exit 1 - fi + exit 1 + fi creatingPrerequisites echo "Resizing on nearfull and full alert and Expansion percentage set to ${growRate}%" growOSD size "${growRate}" "${max}" @@ -260,4 +260,4 @@ size) *) invalidCall ;; -esac +esac diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index 6a70c0b3b850..25e8ede57fab 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -162,9 +162,7 @@ function build_rook_all() { } function validate_yaml() { - cd cluster/examples/kubernetes/ceph - - # create the Rook CRDs and other resources + cd deploy/examples kubectl create -f crds.yaml -f common.yaml # create the volume replication CRDs @@ -187,7 +185,7 @@ function validate_yaml() { function create_cluster_prerequisites() { # this might be called from another function that has already done a cd - ( cd cluster/examples/kubernetes/ceph && kubectl create -f crds.yaml -f common.yaml ) + ( cd deploy/examples && kubectl create -f crds.yaml -f common.yaml ) } function deploy_manifest_with_local_build() { @@ -208,7 +206,7 @@ function replace_ceph_image() { } function deploy_cluster() { - cd cluster/examples/kubernetes/ceph + cd deploy/examples deploy_manifest_with_local_build operator.yaml sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\/}|g" cluster-test.yaml kubectl create -f cluster-test.yaml @@ -272,14 +270,15 @@ function create_LV_on_disk() { sudo vgcreate "$VG" "$BLOCK" || sudo vgcreate "$VG" "$BLOCK" || sudo vgcreate "$VG" "$BLOCK" sudo lvcreate -l 100%FREE -n "${LV}" "${VG}" tests/scripts/localPathPV.sh /dev/"${VG}"/${LV} - kubectl create -f cluster/examples/kubernetes/ceph/crds.yaml - kubectl create -f cluster/examples/kubernetes/ceph/common.yaml + kubectl create -f deploy/examples/crds.yaml + kubectl create -f deploy/examples/common.yaml } function deploy_first_rook_cluster() { BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) create_cluster_prerequisites - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ + deploy_manifest_with_local_build operator.yaml yq w -i -d1 cluster-test.yaml spec.dashboard.enabled false yq w -i -d1 cluster-test.yaml spec.storage.useAllDevices false @@ -290,7 +289,7 @@ function deploy_first_rook_cluster() { function deploy_second_rook_cluster() { BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - sed -i 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g' cluster-test.yaml yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter "${BLOCK}"2 @@ -348,7 +347,7 @@ function restart_operator () { } function write_object_to_cluster1_read_from_cluster2() { - cd cluster/examples/kubernetes/ceph/ + cd deploy/examples/ echo "[default]" > s3cfg echo "host_bucket = no.way.in.hell" >> ./s3cfg echo "use_https = False" >> ./s3cfg diff --git a/tests/scripts/multi-node/build-rook.sh b/tests/scripts/multi-node/build-rook.sh index b238e060e556..3dc475681a65 100755 --- a/tests/scripts/multi-node/build-rook.sh +++ b/tests/scripts/multi-node/build-rook.sh @@ -7,7 +7,7 @@ set -e ############# rook_git_root=$(git rev-parse --show-toplevel) -rook_kube_templates_dir="$rook_git_root/cluster/examples/kubernetes/ceph/" +rook_kube_templates_dir="$rook_git_root/deploy/examples/" #############