diff --git a/.commitlintrc.json b/.commitlintrc.json index 2aef88f0584f..4af9f9226865 100644 --- a/.commitlintrc.json +++ b/.commitlintrc.json @@ -7,14 +7,28 @@ 2, "always", [ - "build", + "block", "bot", - "cassandra", - "ceph", + "build", + "cephfs-mirror", "ci", "core", + "csi", "docs", + "file", + "helm", + "mds", + "mgr", + "mon", + "monitoring", "nfs", + "object", + "operator", + "osd", + "pool", + "rbd-mirror", + "rgw", + "security", "test" ] ], diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6000547bcb86..74f17a2ac15d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,6 +13,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - uses: actions/setup-go@v2 with: @@ -24,8 +26,11 @@ jobs: - name: build rook working-directory: /Users/runner/go/src/github.com/rook/rook run: | - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' VERSION=0 BUILD_CONTAINER_IMAGE=false build + GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='ceph' BUILD_CONTAINER_IMAGE=false build + + - name: validate build + working-directory: /Users/runner/go/src/github.com/rook/rook + run: tests/scripts/validate_modified_files.sh build - name: run codegen working-directory: /Users/runner/go/src/github.com/rook/rook @@ -59,6 +64,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 diff --git a/.github/workflows/canary-integration-test-arm64.yml b/.github/workflows/canary-integration-test-arm64.yml index 4d0dcd674afb..d9c79c6ccdd3 100644 --- a/.github/workflows/canary-integration-test-arm64.yml +++ b/.github/workflows/canary-integration-test-arm64.yml @@ -18,6 +18,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 41d9804c84e3..d2eba9c01201 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -23,6 +23,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -55,7 +57,7 @@ jobs: run: tests/scripts/github-action-helper.sh deploy_cluster - name: wait for prepare pod - run: timeout 300 sh -c 'until kubectl -n rook-ceph logs -f $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do sleep 5; done' + run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - name: wait for ceph to be ready run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 2 @@ -66,7 +68,7 @@ jobs: timeout 15 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr|grep -Eosq \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\" ; do sleep 1 && echo 'waiting for the manager IP to be available'; done" mgr_raw=$(kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr) timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- curl --silent --show-error ${mgr_raw%%:*}:9283; do echo 'waiting for mgr prometheus exporter to be ready' && sleep 1; done" - kubectl -n rook-ceph exec $toolbox -- /bin/bash -c "echo \"$(kubectl get pods -o wide -n rook-ceph -l app=rook-ceph-mgr --no-headers=true|head -n1|awk '{print $6"\t"$1}')\" >>/etc/hosts" + kubectl -n rook-ceph exec $toolbox -- /bin/bash -c "echo \"$(kubectl get pods -o wide -n rook-ceph -l app=rook-ceph-mgr --no-headers=true|awk 'FNR <= 1'|awk '{print $6"\t"$1}')\" >>/etc/hosts" kubectl -n rook-ceph exec $toolbox -- mkdir -p /etc/ceph/test-data kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/test-data/ceph-status-out $toolbox:/etc/ceph/test-data/ kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/create-external-cluster-resources.py $toolbox:/etc/ceph @@ -82,6 +84,11 @@ jobs: - name: check-ownerreferences run: tests/scripts/github-action-helper.sh check_ownerreferences + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload canary test result uses: actions/upload-artifact@v2 if: always() @@ -100,6 +107,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -136,23 +145,15 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod - run: | - timeout 180 sh -c '[ $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'|wc -l) -eq 2 ]; do sleep 5; done'||true - for prepare in $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}'); do - kubectl -n rook-ceph logs -f $prepare - break - done - timeout 60 sh -c 'until kubectl -n rook-ceph logs $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd,ceph_daemon_id=0 -o jsonpath='{.items[*].metadata.name}') --all-containers; do echo "waiting for osd container" && sleep 1; done'||true - kubectl -n rook-ceph describe job/$prepare||true - kubectl -n rook-ceph describe deploy/rook-ceph-osd-0||true + run: tests/scripts/github-action-helper.sh wait_for_prepare_pod - name: wait for ceph to be ready run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 @@ -160,6 +161,11 @@ jobs: - name: check-ownerreferences run: tests/scripts/github-action-helper.sh check_ownerreferences + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload pvc test result uses: actions/upload-artifact@v2 if: always() @@ -178,6 +184,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -213,11 +221,11 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -225,6 +233,11 @@ jobs: - name: wait for ceph to be ready run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 + - name: collect common + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload pvc-db test result uses: actions/upload-artifact@v2 if: always() @@ -243,6 +256,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -279,12 +294,12 @@ jobs: - name: deploy rook run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -294,6 +309,11 @@ jobs: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 kubectl -n rook-ceph get pods + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload pvc-db-wal test result uses: actions/upload-artifact@v2 if: always() @@ -312,6 +332,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -347,11 +369,11 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -362,6 +384,11 @@ jobs: kubectl -n rook-ceph get secrets sudo lsblk + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload encryption-pvc test result uses: actions/upload-artifact@v2 if: always() @@ -380,6 +407,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -416,10 +445,10 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -430,6 +459,11 @@ jobs: kubectl -n rook-ceph get pods kubectl -n rook-ceph get secrets + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload encryption-pvc-db-wal test result uses: actions/upload-artifact@v2 if: always() @@ -448,6 +482,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -484,11 +520,11 @@ jobs: - name: deploy rook run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -499,6 +535,11 @@ jobs: kubectl -n rook-ceph get pods kubectl -n rook-ceph get secrets + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload encryption-pvc-db test result uses: actions/upload-artifact@v2 if: always() @@ -518,6 +559,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -556,7 +599,7 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml cat tests/manifests/test-kms-vault.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 @@ -564,9 +607,8 @@ jobs: kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml yq merge --inplace --arrays append tests/manifests/test-object.yaml tests/manifests/test-kms-vault-spec.yaml sed -i 's/ver1/ver2/g' tests/manifests/test-object.yaml - sed -i 's/VAULT_BACKEND: v1/VAULT_BACKEND: v2/g' tests/manifests/test-object.yaml kubectl create -f tests/manifests/test-object.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -587,6 +629,11 @@ jobs: run: | tests/scripts/deploy-validate-vault.sh validate_rgw + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload encryption-pvc-kms-vault-token-auth test result uses: actions/upload-artifact@v2 if: always() @@ -605,6 +652,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -636,10 +685,10 @@ jobs: - name: deploy cluster run: | - kubectl create -f cluster/examples/kubernetes/ceph/operator.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml - kubectl create -f cluster/examples/kubernetes/ceph/toolbox.yaml + tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml - name: wait for prepare pod run: tests/scripts/github-action-helper.sh wait_for_prepare_pod @@ -650,6 +699,11 @@ jobs: - name: check-ownerreferences run: tests/scripts/github-action-helper.sh check_ownerreferences + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: Upload pvc test result uses: actions/upload-artifact@v2 if: always() @@ -668,6 +722,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -708,7 +764,6 @@ jobs: yq w -i -d1 cluster-test.yaml spec.dashboard.enabled false yq w -i -d1 cluster-test.yaml spec.storage.useAllDevices false yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter ${BLOCK}1 - yq w -i -d1 cluster-test.yaml spec.cephVersion.image ceph/daemon-base:latest-pacific-devel kubectl create -f cluster-test.yaml -f rbdmirror.yaml -f filesystem-mirror.yaml -f toolbox.yaml # cephfs-mirroring is a push operation @@ -736,41 +791,95 @@ jobs: yq w -i pool-test.yaml spec.mirroring.enabled true yq w -i pool-test.yaml spec.mirroring.mode image kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool to created" && sleep 1; done' + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' + + - name: create replicated mirrored pool 2 on cluster 1 + run: | + cd cluster/examples/kubernetes/ceph/ + yq w -i pool-test.yaml metadata.name replicapool2 + kubectl create -f pool-test.yaml + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool2 -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' + yq w -i pool-test.yaml metadata.name replicapool - name: create replicated mirrored pool on cluster 2 run: | cd cluster/examples/kubernetes/ceph/ yq w -i pool-test.yaml metadata.namespace rook-ceph-secondary kubectl create -f pool-test.yaml - timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool to created" && sleep 1; done' + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' + + - name: create replicated mirrored pool 2 on cluster 2 + run: | + cd cluster/examples/kubernetes/ceph/ + yq w -i pool-test.yaml metadata.name replicapool2 + kubectl create -f pool-test.yaml + timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' - - name: create image in the pool + - name: create images in the pools run: | kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool create test -s 1G kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool/test snapshot kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test + kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool2 create test -s 1G + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool2/test snapshot + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test + + - name: copy block mirror peer secret into the other cluster for replicapool + run: | + kubectl -n rook-ceph get secret pool-peer-token-replicapool -o yaml > pool-peer-token-replicapool.yaml + yq delete --inplace pool-peer-token-replicapool.yaml metadata.ownerReferences + yq write --inplace pool-peer-token-replicapool.yaml metadata.namespace rook-ceph-secondary + yq write --inplace pool-peer-token-replicapool.yaml metadata.name pool-peer-token-replicapool-config + kubectl create --namespace=rook-ceph-secondary -f pool-peer-token-replicapool.yaml + + - name: copy block mirror peer secret into the other cluster for replicapool2 (using cluster global peer) + run: | + kubectl -n rook-ceph get secret cluster-peer-token-my-cluster -o yaml > cluster-peer-token-my-cluster.yaml + yq delete --inplace cluster-peer-token-my-cluster.yaml metadata.ownerReferences + yq write --inplace cluster-peer-token-my-cluster.yaml metadata.namespace rook-ceph-secondary + yq write --inplace cluster-peer-token-my-cluster.yaml metadata.name cluster-peer-token-my-cluster-config + kubectl create --namespace=rook-ceph-secondary -f cluster-peer-token-my-cluster.yaml + + - name: add block mirror peer secret to the other cluster for replicapool + run: | + kubectl -n rook-ceph-secondary patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' - - name: copy block mirror peer secret into the other cluster + - name: add block mirror peer secret to the other cluster for replicapool2 (using cluster global peer) run: | - kubectl -n rook-ceph get secret pool-peer-token-replicapool -o yaml |\ - sed 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g; s/name: pool-peer-token-replicapool/name: pool-peer-token-replicapool-config/g' |\ - kubectl create --namespace=rook-ceph-secondary -f - + kubectl -n rook-ceph-secondary patch cephblockpool replicapool2 --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["cluster-peer-token-my-cluster-config"]}}}}' - - name: add block mirror peer secret to the other cluster + - name: verify image has been mirrored for replicapool run: | - kubectl -n rook-ceph-secondary patch cephrbdmirror my-rbd-mirror --type merge -p '{"spec":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}' + # let's wait a bit for the image to be present + timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool" && sleep 1; done' - - name: verify image has been mirrored + - name: verify image has been mirrored for replicapool2 run: | # let's wait a bit for the image to be present - timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored" && sleep 1; done' + timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool2 ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool2" && sleep 1; done' - name: display cephblockpool and image status run: | - timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated" && sleep 1; done' - kubectl -n rook-ceph-secondary get cephblockpool -o yaml + timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool" && sleep 1; done' + timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool2" && sleep 1; done' + kubectl -n rook-ceph-secondary get cephblockpool replicapool -o yaml + kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o yaml kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test + kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test + + - name: copy block mirror peer secret into the primary cluster for replicapool + run: | + kubectl -n rook-ceph-secondary get secret pool-peer-token-replicapool -o yaml |\ + sed 's/namespace: rook-ceph-secondary/namespace: rook-ceph/g; s/name: pool-peer-token-replicapool/name: pool-peer-token-replicapool-config/g' |\ + kubectl create --namespace=rook-ceph -f - + + - name: add block mirror peer secret to the primary cluster for replicapool + run: | + kubectl -n rook-ceph patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' + + - name: wait for rook-ceph-csi-mapping-config to be updated with cluster ID + run: | + timeout 60 sh -c 'until [ "$(kubectl get cm -n rook-ceph rook-ceph-csi-mapping-config -o jsonpath='{.data.csi-mapping-config-json}' | grep -c "rook-ceph-secondary")" -eq 1 ]; do echo "waiting for rook-ceph-csi-mapping-config to be created with cluster ID mappings" && sleep 1; done' - name: create replicated mirrored filesystem on cluster 1 run: | @@ -801,7 +910,7 @@ jobs: - name: verify fs mirroring is working run: | - timeout 45 sh -c 'until [ "$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/|grep -c asok)" -eq 3 ]; do echo "waiting for connection to peer" && sleep 1; done' + timeout 45 sh -c 'until [ "$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/|grep -c asok)" -lt 3 ]; do echo "waiting for connection to peer" && sleep 1; done' sockets=$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/) status=$(for socket in $sockets; do minikube kubectl -- -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ceph --admin-daemon /var/run/ceph/$socket help|awk -F ":" '/get filesystem mirror status/ {print $1}'; done) if [ "${#status}" -lt 1 ]; then echo "peer addition failed" && exit 1; fi @@ -812,6 +921,11 @@ jobs: # the check is not super ideal since 'mirroring_failed' is only displayed when there is a failure but not when it's working... timeout 60 sh -c 'while [ "$(kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- ceph fs snapshot mirror daemon status myfs|jq -r '.[0].filesystems[0]'|grep -c "mirroring_failed")" -eq 1 ]; do echo "waiting for filesystem to be mirrored" && sleep 1; done' + - name: collect common logs + if: always() + run: | + tests/scripts/collect-logs.sh + - name: upload test result uses: actions/upload-artifact@v2 if: always() diff --git a/.github/workflows/codegen.yml b/.github/workflows/codegen.yml index 3cd1ea884ad0..81ba0ec664a5 100644 --- a/.github/workflows/codegen.yml +++ b/.github/workflows/codegen.yml @@ -26,6 +26,8 @@ jobs: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: copy working directory to GOPATH run: sudo mkdir -p /home/runner/go/src/github.com && sudo cp -a /home/runner/work/rook /home/runner/go/src/github.com/ diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml index 13d9c315aca7..509ea592a0e3 100644 --- a/.github/workflows/codespell.yaml +++ b/.github/workflows/codespell.yaml @@ -17,6 +17,8 @@ jobs: runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: codespell uses: codespell-project/actions-codespell@master with: diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml index 23921b5d58e0..5624d95aa520 100644 --- a/.github/workflows/commitlint.yml +++ b/.github/workflows/commitlint.yml @@ -23,4 +23,4 @@ jobs: - uses: wagoid/commitlint-github-action@v2.0.3 with: configFile: './.commitlintrc.json' - helpURL: https://rook.io/docs/rook/master/development-flow.html#commit-structure + helpURL: https://rook.io/docs/rook/latest/development-flow.html#commit-structure diff --git a/.github/workflows/crds-gen.yml b/.github/workflows/crds-gen.yml index 199767cb753e..ad3bffe4cf22 100644 --- a/.github/workflows/crds-gen.yml +++ b/.github/workflows/crds-gen.yml @@ -26,6 +26,8 @@ jobs: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: copy working directory to GOPATH run: sudo mkdir -p /home/runner/go/src/github.com && sudo cp -a /home/runner/work/rook /home/runner/go/src/github.com/ diff --git a/.github/workflows/create-tag.yaml b/.github/workflows/create-tag.yaml new file mode 100644 index 000000000000..d7815a904088 --- /dev/null +++ b/.github/workflows/create-tag.yaml @@ -0,0 +1,42 @@ +name: Tag +on: + workflow_dispatch: + inputs: + version: + description: 'Release version (e.g. v1.7.0)' + required: true + +defaults: + run: + # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell + shell: bash --noprofile --norc -eo pipefail -x {0} + +jobs: + Create-Tag: + runs-on: ubuntu-18.04 + if: github.repository == 'rook/rook' && contains('travisn,leseb,BlaineEXE,jbw976,galexrt,satoru-takeuchi', github.actor) + steps: + - name: checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: set env + run: | + echo "FROM_BRANCH=${GITHUB_REF##*/}" >> $GITHUB_ENV + echo "TO_TAG=$(git describe --abbrev=0 --tags)" >> $GITHUB_ENV + echo "GITHUB_USER=rook" >> $GITHUB_ENV + + - name: Create Tag + uses: negz/create-tag@v1 + with: + version: ${{ github.event.inputs.version }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Get Release Note + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_USER: ${{ env.GITHUB_USER }} + FROM_BRANCH: ${{ env.FROM_BRANCH }} + TO_TAG: ${{ env.TO_TAG }} + run: tests/scripts/gen_release_notes.sh diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 2eeab3376f12..19a423f56837 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -20,6 +20,8 @@ jobs: with: go-version: 1.16 - uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: diff --git a/.github/workflows/integration-test-cassandra-suite.yaml b/.github/workflows/integration-test-cassandra-suite.yaml deleted file mode 100644 index e76e5f23cceb..000000000000 --- a/.github/workflows/integration-test-cassandra-suite.yaml +++ /dev/null @@ -1,62 +0,0 @@ -name: Integration test CassandraSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestCassandraSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && contains(github.event.pull_request.labels.*.name, 'cassandra')}} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15', 'v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='cassandra' VERSION=0 build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/cassandra:master - - - name: TestCassandraSuite - run: | - go test -v -timeout 1800s -run CassandraSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: cassandra-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-flex-suite.yaml b/.github/workflows/integration-test-flex-suite.yaml index 13b57be54c35..f4662be8b3ed 100644 --- a/.github/workflows/integration-test-flex-suite.yaml +++ b/.github/workflows/integration-test-flex-suite.yaml @@ -17,6 +17,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -42,14 +44,23 @@ jobs: - name: TestCephFlexSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="flex-ns" + export OPERATOR_NAMESPACE="flex-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-flex-suite-artifact + name: ceph-flex-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-test-helm-suite.yaml b/.github/workflows/integration-test-helm-suite.yaml index beeb2b313692..3c28d56bd4a9 100644 --- a/.github/workflows/integration-test-helm-suite.yaml +++ b/.github/workflows/integration-test-helm-suite.yaml @@ -21,6 +21,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -49,16 +51,25 @@ jobs: - name: TestCephHelmSuite run: | - tests/scripts/minikube.sh helm - tests/scripts/helm.sh up - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_TEST_CLEANUP=false SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephHelmSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + tests/scripts/minikube.sh helm + tests/scripts/helm.sh up + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_TEST_CLEANUP=false SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephHelmSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="helm-ns" + export OPERATOR_NAMESPACE="helm-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-helm-suite-artifact + name: ceph-helm-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-test-mgr-suite.yaml b/.github/workflows/integration-test-mgr-suite.yaml index 58ef88d856ca..26790d009a65 100644 --- a/.github/workflows/integration-test-mgr-suite.yaml +++ b/.github/workflows/integration-test-mgr-suite.yaml @@ -18,6 +18,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -43,14 +45,23 @@ jobs: - name: TestCephMgrSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephMgrSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephMgrSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="mgr-ns" + export OPERATOR_NAMESPACE="mgr-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-mgr-suite-artifact + name: ceph-mgr-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-test-multi-cluster-suite.yaml b/.github/workflows/integration-test-multi-cluster-suite.yaml index b51c0fb8a482..35afee29b29d 100644 --- a/.github/workflows/integration-test-multi-cluster-suite.yaml +++ b/.github/workflows/integration-test-multi-cluster-suite.yaml @@ -21,6 +21,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -46,15 +48,24 @@ jobs: - name: TestCephMultiClusterDeploySuite run: | - export TEST_SCRATCH_DEVICE=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1)1 - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephMultiClusterDeploySuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export TEST_SCRATCH_DEVICE=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1)1 + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephMultiClusterDeploySuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export OPERATOR_NAMESPACE="multi-core-system" + CLUSTER_NAMESPACE="multi-core" tests/scripts/collect-logs.sh + CLUSTER_NAMESPACE="multi-external" tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-multi-cluster-deploy-suite-artifact + name: ceph-multi-cluster-deploy-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-test-nfs-suite.yaml b/.github/workflows/integration-test-nfs-suite.yaml deleted file mode 100644 index 70ef77ecf60c..000000000000 --- a/.github/workflows/integration-test-nfs-suite.yaml +++ /dev/null @@ -1,66 +0,0 @@ -name: Integration test NFSSuite -on: - pull_request: - branches: - - master - - release-* - -defaults: - run: - # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell - shell: bash --noprofile --norc -eo pipefail -x {0} - -jobs: - TestNfsSuite: - if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && contains(github.event.pull_request.labels.*.name, 'nfs')}} - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15', 'v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: print k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='nfs' VERSION=0 build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/nfs:master - - - name: install nfs-common - run: | - sudo apt-get update - sudo apt-get install nfs-common - - - name: TestNFSSuite - run: go test -v -timeout 1800s -run NfsSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: nfs-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 diff --git a/.github/workflows/integration-test-smoke-suite.yaml b/.github/workflows/integration-test-smoke-suite.yaml index 2c33f38dd7f1..486f0d41b3b6 100644 --- a/.github/workflows/integration-test-smoke-suite.yaml +++ b/.github/workflows/integration-test-smoke-suite.yaml @@ -21,6 +21,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -46,14 +48,23 @@ jobs: - name: TestCephSmokeSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="smoke-ns" + export OPERATOR_NAMESPACE="smoke-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-smoke-suite-artifact + name: ceph-smoke-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-test-upgrade-suite.yaml b/.github/workflows/integration-test-upgrade-suite.yaml index e3565718442d..5b5240297af1 100644 --- a/.github/workflows/integration-test-upgrade-suite.yaml +++ b/.github/workflows/integration-test-upgrade-suite.yaml @@ -21,6 +21,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -46,14 +48,23 @@ jobs: - name: TestCephUpgradeSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephUpgradeSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephUpgradeSuite/TestUpgradeRookToMaster github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="upgrade-ns" + export OPERATOR_NAMESPACE="upgrade-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-upgrade-suite-artifact + name: ceph-upgrade-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/integration-tests-on-release.yaml b/.github/workflows/integration-tests-on-release.yaml index 7b7ff04f29e5..b8c3be209f3b 100644 --- a/.github/workflows/integration-tests-on-release.yaml +++ b/.github/workflows/integration-tests-on-release.yaml @@ -14,15 +14,16 @@ defaults: jobs: TestCephFlexSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' runs-on: ubuntu-18.04 strategy: fail-fast: false matrix: - kubernetes-versions : ['v1.15.12','v1.18.15','v1.20.5','v1.21.0'] + kubernetes-versions : ['v1.15.12','v1.18.15','v1.21.0'] steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -48,14 +49,23 @@ jobs: - name: TestCephFlexSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephFlexSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="flex-ns" + export OPERATOR_NAMESPACE="flex-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-flex-suite-artifact + name: ceph-flex-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging @@ -64,7 +74,6 @@ jobs: timeout-minutes: 120 TestCephHelmSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' runs-on: ubuntu-18.04 strategy: fail-fast: false @@ -73,6 +82,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -101,16 +112,25 @@ jobs: - name: TestCephHelmSuite run: | + tests/scripts/github-action-helper.sh collect_udev_logs_in_background tests/scripts/minikube.sh helm tests/scripts/helm.sh up export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) SKIP_TEST_CLEANUP=false SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephHelmSuite github.com/rook/rook/tests/integration + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="helm-ns" + export OPERATOR_NAMESPACE="helm-ns-system" + tests/scripts/collect-logs.sh + - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-helm-suite-artifact + name: ceph-helm-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging @@ -119,7 +139,6 @@ jobs: timeout-minutes: 120 TestCephMultiClusterDeploySuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' runs-on: ubuntu-18.04 strategy: fail-fast: false @@ -128,6 +147,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -153,15 +174,24 @@ jobs: - name: TestCephMultiClusterDeploySuite run: | + tests/scripts/github-action-helper.sh collect_udev_logs_in_background export TEST_SCRATCH_DEVICE=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1)1 export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) go test -v -timeout 1800s -run CephMultiClusterDeploySuite github.com/rook/rook/tests/integration + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export OPERATOR_NAMESPACE="multi-core-system" + CLUSTER_NAMESPACE="multi-core" tests/scripts/collect-logs.sh + CLUSTER_NAMESPACE="multi-external" tests/scripts/collect-logs.sh + - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-multi-cluster-deploy-suite-artifact + name: ceph-multi-cluster-deploy-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging @@ -170,7 +200,6 @@ jobs: timeout-minutes: 120 TestCephSmokeSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' runs-on: ubuntu-18.04 strategy: fail-fast: false @@ -179,6 +208,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -204,14 +235,23 @@ jobs: - name: TestCephSmokeSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integration + + - name: collect common logs + if: always() + run: | + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="smoke-ns" + export OPERATOR_NAMESPACE="smoke-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: ceph-smoke-suite-artifact + name: ceph-smoke-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging @@ -220,7 +260,6 @@ jobs: timeout-minutes: 120 TestCephUpgradeSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' runs-on: ubuntu-18.04 strategy: fail-fast: false @@ -229,6 +268,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: setup golang uses: actions/setup-go@v2 @@ -254,119 +295,23 @@ jobs: - name: TestCephUpgradeSuite run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - go test -v -timeout 1800s -run CephUpgradeSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: ceph-upgrade-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestCassandraSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: check k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='cassandra' VERSION=0 build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/cassandra:master - - - name: TestCassandraSuite - run: | - export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) - SKIP_CLEANUP_POLICY=false go test -v -timeout 1800s -run CassandraSuite github.com/rook/rook/tests/integration - - - name: Artifact - uses: actions/upload-artifact@v2 - if: failure() - with: - name: cassandra-suite-artifact - path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - - - name: setup tmate session for debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 120 - - TestNFSSuite: - if: github.ref == 'refs/heads/master' || github.ref == 'refs/tags/release-*' - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - kubernetes-versions : ['v1.16.15','v1.18.15','v1.20.5','v1.21.0'] - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: setup golang - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 - with: - minikube version: 'v1.21.0' - kubernetes version: ${{ matrix.kubernetes-versions }} - start args: --memory 6g --cpus=2 - github token: ${{ secrets.GITHUB_TOKEN }} - - - name: check k8s cluster status - run: tests/scripts/github-action-helper.sh print_k8s_cluster_status - - - name: build rook - run: | - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - GOPATH=$(go env GOPATH) make clean && make -j$nproc IMAGES='nfs' VERSION=0 build - docker images - docker tag $(docker images|awk '/build-/ {print $1}') rook/nfs:master + tests/scripts/github-action-helper.sh collect_udev_logs_in_background + export DEVICE_FILTER=$(lsblk|awk '/14G/ {print $1}'| head -1) + go test -v -timeout 1800s -run CephUpgradeSuite/TestUpgradeRookToMaster github.com/rook/rook/tests/integration - - name: install nfs-common + - name: collect common logs + if: always() run: | - sudo apt-get update - sudo apt-get install nfs-common - - - name: TestNFSSuite - run: go test -v -timeout 1800s -run NfsSuite github.com/rook/rook/tests/integration + export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/" + export CLUSTER_NAMESPACE="upgrade-ns" + export OPERATOR_NAMESPACE="upgrade-ns-system" + tests/scripts/collect-logs.sh - name: Artifact uses: actions/upload-artifact@v2 if: failure() with: - name: nfs-suite-artifact + name: ceph-upgrade-suite-artifact-${{ matrix.kubernetes-versions }} path: /home/runner/work/rook/rook/tests/integration/_output/tests/ - name: setup tmate session for debugging diff --git a/.github/workflows/mod-check.yml b/.github/workflows/mod-check.yml index a31920d23222..212a30a42c2d 100644 --- a/.github/workflows/mod-check.yml +++ b/.github/workflows/mod-check.yml @@ -22,6 +22,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - uses: actions/setup-go@v2 with: diff --git a/.github/workflows/push-build.yaml b/.github/workflows/push-build.yaml index f71990d45012..089eaec3707b 100644 --- a/.github/workflows/push-build.yaml +++ b/.github/workflows/push-build.yaml @@ -19,6 +19,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 # docker/setup-qemu action installs QEMU static binaries, which are used to run builders for architectures other than the host. - name: set up QEMU @@ -39,9 +41,6 @@ jobs: aws-secret-access-key: ${{ secrets.AWS_PSW }} aws-region: us-east-1 - - name: unshallow - run: git fetch --prune --unshallow --tags --force - # creating custom env var - name: set env run: | @@ -56,8 +55,4 @@ jobs: AWS_PSW: ${{ secrets.AWS_PSW }} GITHUB_REF: $ {{ env.GITHUB_REF }} run: | - if [[ ${GITHUB_REF} =~ master|v ]]; then - tests/scripts/build-release.sh publish_and_promote - else - tests/scripts/build-release.sh publish - fi + tests/scripts/build-release.sh diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index a82c6906c64a..59e1ba87ca77 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -22,6 +22,8 @@ jobs: steps: - name: checkout uses: actions/checkout@v2 + with: + fetch-depth: 0 - uses: actions/setup-go@v2 with: diff --git a/.github/workflows/yaml-lint.yaml b/.github/workflows/yaml-lint.yaml index b7757b438330..70a779f19b7a 100644 --- a/.github/workflows/yaml-lint.yaml +++ b/.github/workflows/yaml-lint.yaml @@ -16,6 +16,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v2 diff --git a/.mergify.yml b/.mergify.yml index 2d8d1fa09c67..6b22c018f61f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,22 +1,4 @@ pull_request_rules: - # auto label PRs based on title content - - name: auto ceph label pr storage backend - conditions: - - title~=^ceph - - base=master - actions: - label: - add: - - ceph - - name: auto cassandra label pr storage backend - conditions: - - title~=^cassandra - - base=master - actions: - label: - add: - - cassandra - # if there is a conflict in a backport PR, ping the author to send a proper backport PR - name: ping author on conflicts conditions: diff --git a/Documentation/README.md b/Documentation/README.md index 12aaf91933a3..e57ddd93a84c 100644 --- a/Documentation/README.md +++ b/Documentation/README.md @@ -6,23 +6,18 @@ Rook turns storage software into self-managing, self-scaling, and self-healing s Rook integrates deeply into cloud native environments leveraging extension points and providing a seamless experience for scheduling, lifecycle management, resource management, security, monitoring, and user experience. -For more details about the status of storage solutions currently supported by Rook, please refer to the [project status section](https://github.com/rook/rook/blob/master/README.md#project-status) of the Rook repository. -We plan to continue adding support for other storage systems and environments based on community demand and engagement in future releases. +The Ceph operator was declared stable in December 2018 in the Rook v0.9 release, providing a production storage platform for several years already. -## Quick Start Guides +## Quick Start Guide -Starting Rook in your cluster is as simple as a few `kubectl` commands depending on the storage provider. -See our [Quickstart](quickstart.md) guide list for the detailed instructions for each storage provider. +Starting Ceph in your cluster is as simple as a few `kubectl` commands. +See our [Quickstart](quickstart.md) guide to get started with the Ceph operator! -## Storage Provider Designs +## Designs -High-level Storage Provider design documents: +[Ceph](https://docs.ceph.com/en/latest/) is a highly scalable distributed storage solution for block storage, object storage, and shared filesystems with years of production deployments. See the [Ceph overview](ceph-storage.md). -| Storage Provider | Status | Description | -| ----------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Ceph](ceph-storage.md) | Stable | Ceph is a highly scalable distributed storage solution for block storage, object storage, and shared filesystems with years of production deployments. | - -Low level design documentation for supported list of storage systems collected at [design docs](https://github.com/rook/rook/tree/master/design) section. +For detailed design documentation, see also the [design docs](https://github.com/rook/rook/tree/master/design). ## Need help? Be sure to join the Rook Slack diff --git a/Documentation/async-disaster-recovery.md b/Documentation/async-disaster-recovery.md new file mode 100644 index 000000000000..9577976b33c9 --- /dev/null +++ b/Documentation/async-disaster-recovery.md @@ -0,0 +1,112 @@ +--- +title: Failover and Failback +weight: 3245 +indent: true +--- + +# RBD Asynchronous DR Failover and Failback + +## Table of Contents + +* [Planned Migration and Disaster Recovery](#planned-migration-and-disaster-recovery) +* [Planned Migration](#planned-migration) + * [Relocation](#relocation) +* [Disaster Recovery](#disaster-recovery) + * [Failover](#failover-abrupt-shutdown) + * [Failback](#failback-post-disaster-recovery) + +## Planned Migration and Disaster Recovery + +Rook comes with the volume replication support, which allows users to perform disaster recovery and planned migration of clusters. + +The following document will help to track the procedure for failover and failback in case of a Disaster recovery or Planned migration use cases. + +> **Note**: The document assumes that RBD Mirroring is set up between the peer clusters. +> For information on rbd mirroring and how to set it up using rook, please refer to +> the [rbd-mirroring guide](rbd-mirroring.md). + +## Planned Migration + +> Use cases: Datacenter maintenance, technology refresh, disaster avoidance, etc. + +### Relocation + +The Relocation operation is the process of switching production to a + backup facility(normally your recovery site) or vice versa. For relocation, + access to the image on the primary site should be stopped. +The image should now be made *primary* on the secondary cluster so that + the access can be resumed there. + +> :memo: Periodic or one-time backup of +> the application should be available for restore on the secondary site (cluster-2). + +Follow the below steps for planned migration of workload from the primary + cluster to the secondary cluster: + +* Scale down all the application pods which are using the + mirrored PVC on the Primary Cluster. +* [Take a backup](rbd-mirroring.md#backup-&-restore) of PVC and PV object from the primary cluster. + This can be done using some backup tools like + [velero](https://velero.io/docs/main/). +* [Update VolumeReplication CR](rbd-mirroring.md#create-a-volumereplication-cr) to set `replicationState` to `secondary` at the Primary Site. + When the operator sees this change, it will pass the information down to the + driver via GRPC request to mark the dataSource as `secondary`. +* If you are manually recreating the PVC and PV on the secondary cluster, + remove the `claimRef` section in the PV objects. (See [this](rbd-mirroring.md#restore-the-backup-on-cluster-2) for details) +* Recreate the storageclass, PVC, and PV objects on the secondary site. +* As you are creating the static binding between PVC and PV, a new PV won’t + be created here, the PVC will get bind to the existing PV. +* [Create the VolumeReplicationClass](rbd-mirroring.md#create-a-volume-replication-class-cr) on the secondary site. +* [Create VolumeReplications](rbd-mirroring.md#create-a-volumereplication-cr) for all the PVC’s for which mirroring + is enabled + * `replicationState` should be `primary` for all the PVC’s on + the secondary site. +* [Check VolumeReplication CR status](rbd-mirroring.md#checking-replication-status) to verify if the image is marked `primary` on the secondary site. +* Once the Image is marked as `primary`, the PVC is now ready + to be used. Now, we can scale up the applications to use the PVC. + +>:memo: **WARNING**: In Async Disaster recovery use case, we don't get +> the complete data. +> We will only get the crash-consistent data based on the snapshot interval time. + +## Disaster Recovery + +> Use cases: Natural disasters, Power failures, System failures, and crashes, etc. + +> **NOTE:** To effectively resume operations after a failover/relocation, +> backup of the kubernetes artifacts like deployment, PVC, PV, etc need to be created beforehand by the admin; so that the application can be restored on the peer cluster. For more information, see [backup and restore](rbd-mirroring.md#backup-&-restore). +### Failover (abrupt shutdown) + +In case of Disaster recovery, create VolumeReplication CR at the Secondary Site. + Since the connection to the Primary Site is lost, the operator automatically + sends a GRPC request down to the driver to forcefully mark the dataSource as `primary` + on the Secondary Site. + +* If you are manually creating the PVC and PV on the secondary cluster, remove + the claimRef section in the PV objects. (See [this](rbd-mirroring.md#restore-the-backup-on-cluster-2) for details) +* Create the storageclass, PVC, and PV objects on the secondary site. +* As you are creating the static binding between PVC and PV, a new PV won’t be + created here, the PVC will get bind to the existing PV. +* [Create the VolumeReplicationClass](rbd-mirroring.md#create-a-volume-replication-class-cr) and [VolumeReplication CR](rbd-mirroring.md#create-a-volumereplication-cr) on the secondary site. +* [Check VolumeReplication CR status](rbd-mirroring.md#checking-replication-status) to verify if the image is marked `primary` on the secondary site. +* Once the Image is marked as `primary`, the PVC is now ready to be used. Now, + we can scale up the applications to use the PVC. + +### Failback (post-disaster recovery) + +Once the failed cluster is recovered on the primary site and you want to failback + from secondary site, follow the below steps: + +* Scale down the running applications (if any) on the primary site. + Ensure that all persistent volumes in use by the workload are no + longer in use on the primary cluster. +* [Update VolumeReplication CR](rbd-mirroring.md#create-a-volumereplication-cr) replicationState + from `primary` to `secondary` on the primary site. +* Scale down the applications on the secondary site. +* [Update VolumeReplication CR](rbd-mirroring.md#create-a-volumereplication-cr) replicationState state from `primary` to + `secondary` in secondary site. +* On the primary site, [verify the VolumeReplication status](rbd-mirroring.md#checking-replication-status) is marked as + volume ready to use. +* Once the volume is marked to ready to use, change the replicationState state + from `secondary` to `primary` in primary site. +* Scale up the applications again on the primary site. diff --git a/Documentation/authenticated-registry.md b/Documentation/authenticated-registry.md new file mode 100644 index 000000000000..f9903e8cc96f --- /dev/null +++ b/Documentation/authenticated-registry.md @@ -0,0 +1,65 @@ +--- +title: Authenticated Registries +weight: 1100 +indent: true +--- + +## Authenticated docker registries + +If you want to use an image from authenticated docker registry (e.g. for image cache/mirror), you'll need to +add an `imagePullSecret` to all relevant service accounts. This way all pods created by the operator (for service account: +`rook-ceph-system`) or all new pods in the namespace (for service account: `default`) will have the `imagePullSecret` added +to their spec. + +The whole process is described in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). + +### Example setup for a ceph cluster + +To get you started, here's a quick rundown for the ceph example from the [quickstart guide](/Documentation/quickstart.md). + +First, we'll create the secret for our registry as described [here](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod): + +```console +# for namespace rook-ceph +$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL + +# and for namespace rook-ceph (cluster) +$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +``` + +Next we'll add the following snippet to all relevant service accounts as described [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account): + +```yaml +imagePullSecrets: +- name: my-registry-secret +``` + +The service accounts are: + +* `rook-ceph-system` (namespace: `rook-ceph`): Will affect all pods created by the rook operator in the `rook-ceph` namespace. +* `default` (namespace: `rook-ceph`): Will affect most pods in the `rook-ceph` namespace. +* `rook-ceph-mgr` (namespace: `rook-ceph`): Will affect the MGR pods in the `rook-ceph` namespace. +* `rook-ceph-osd` (namespace: `rook-ceph`): Will affect the OSD pods in the `rook-ceph` namespace. + +You can do it either via e.g. `kubectl -n edit serviceaccount default` or by modifying the [`operator.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) +and [`cluster.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml) before deploying them. + +Since it's the same procedure for all service accounts, here is just one example: + +```console +kubectl -n rook-ceph edit serviceaccount default +``` + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default + namespace: rook-ceph +secrets: +- name: default-token-12345 +imagePullSecrets: # here are the new +- name: my-registry-secret # parts +``` + +After doing this for all service accounts all pods should be able to pull the image from your registry. diff --git a/Documentation/cassandra-cluster-crd.md b/Documentation/cassandra-cluster-crd.md deleted file mode 100644 index 5d6be72cc6e1..000000000000 --- a/Documentation/cassandra-cluster-crd.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Cassandra Cluster CRD -weight: 5000 ---- - -# Cassandra Cluster CRD - -Cassandra database clusters can be created and configuring using the `clusters.cassandra.rook.io` custom resource definition (CRD). - -Please refer to the the [user guide walk-through](cassandra.md) for complete instructions. -This page will explain all the available configuration options on the Cassandra CRD. - -## Sample - -```yaml -apiVersion: cassandra.rook.io/v1alpha1 -kind: Cluster -metadata: - name: rook-cassandra - namespace: rook-cassandra -spec: - version: 3.11.6 - repository: my-private-repo.io/cassandra - mode: cassandra - # A key/value list of annotations - annotations: - # key: value - datacenter: - name: us-east-1 - racks: - - name: us-east-1a - members: 3 - storage: - volumeClaimTemplates: - - metadata: - name: rook-cassandra-data - spec: - storageClassName: my-storage-class - resources: - requests: - storage: 200Gi - resources: - requests: - cpu: 8 - memory: 32Gi - limits: - cpu: 8 - memory: 32Gi - # A key/value list of annotations - annotations: - # key: value - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: failure-domain.beta.kubernetes.io/region - operator: In - values: - - us-east-1 - - key: failure-domain.beta.kubernetes.io/zone - operator: In - values: - - us-east-1a -``` - -## Settings Explanation - -### Cluster Settings - -* `version`: The version of Cassandra to use. It is used as the image tag to pull. -* `repository`: Optional field. Specifies a custom image repo. If left unset, the official docker hub repo is used. -* `mode`: Optional field. Specifies if this is a Cassandra or Scylla cluster. If left unset, it defaults to cassandra. Values: {scylla, cassandra} -* `annotations`: Key value pair list of annotations to add. - -In the Cassandra model, each cluster contains datacenters and each datacenter contains racks. At the moment, the operator only supports single datacenter setups. - -### Datacenter Settings - -* `name`: Name of the datacenter. Usually, a datacenter corresponds to a region. -* `racks`: List of racks for the specific datacenter. - -### Rack Settings - -* `name`: Name of the rack. Usually, a rack corresponds to an availability zone. -* `members`: Number of Cassandra members for the specific rack. (In Cassandra documentation, they are called nodes. We don't call them nodes to avoid confusion as a Cassandra Node corresponds to a Kubernetes Pod, not a Kubernetes Node). -* `storage`: Defines the volumes to use for each Cassandra member. Currently, only 1 volume is supported. -* `jmxExporterConfigMapName`: Name of configmap that will be used for [jmx_exporter](https://github.com/prometheus/jmx_exporter). Exporter listens on port 9180. If the name not specified, the exporter will not be run. -* `resources`: Defines the CPU and RAM resources for the Cassandra Pods. -* `annotations`: Key value pair list of annotations to add. -* `placement`: Defines the placement of Cassandra Pods. Has the following subfields: - * [`nodeAffinity`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - * [`podAffinity`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - * [`podAntiAffinity`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - * [`tolerations`](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) diff --git a/Documentation/cassandra-operator-upgrade.md b/Documentation/cassandra-operator-upgrade.md deleted file mode 100644 index bb90a200104b..000000000000 --- a/Documentation/cassandra-operator-upgrade.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Upgrade -weight: 5100 -indent: true ---- - -# Cassandra Operator Upgrades - -This guide will walk you through the manual steps to upgrade the software in Cassandra Operator from one version to the next. The cassandra operator is made up of two parts: - -1. The `Operator` binary that runs as a standalone application, watches the Cassandra Cluster CRD and makes administrative decisions. -1. A sidecar that runs alongside each member of a Cassandra Cluster. We will call this component `Sidecar`. - -Both components should be updated. This is a very manual process at the moment, but it should be automated soon in the future, once the Cassandra Operator reaches the beta stage. - -## Considerations - -With this upgrade guide, there are a few notes to consider: - -* **WARNING**: Upgrading a Rook cluster is not without risk. There may be unexpected issues or - obstacles that damage the integrity and health of your storage cluster, including data loss. Only - proceed with this guide if you are comfortable with that. It is recommended that you backup your data before proceeding. -* **WARNING**: The current process to upgrade REQUIRES the cluster to be unavailable for the time of the upgrade. - -## Prerequisites - -* If you are upgrading from v0.9.2 to a later version, the mount point of the PVC for each member has changed because it was wrong. Please follow the [migration instructions for upgrading from v0.9.2](#before-upgrading-from-v092). -* Before starting the procedure, ensure that your Cassandra Clusters are in a healthy state. You can check a Cassandra Cluster's health by using `kubectl describe clusters.cassandra.rook.io $NAME -n $NAMESPACE` and ensuring that for each rack in the Status, `readyMembers` equals `members`. - -## Procedure - -1. Because each version of the `Operator` is designed to work with the same version of the `Sidecar`, they must be upgraded together. In order to avoid mixing versions between the `Operator` and `Sidecar`, we first delete every Cassandra Cluster CRD in our Kubernetes cluster, after first backing up their manifests. This will not delete your data because the PVCs will be retained even if the Cassandra Cluster object is deleted. Example: - -```console -# Assumes cluster rook-cassandra in namespace rook-cassandra -NAME=rook-cassandra -NAMESPACE=rook-cassandra - -kubectl get clusters.cassandra.rook.io $NAME -n $NAMESPACE -o yaml > $NAME.yaml -kubectl delete clusters.cassandra.rook.io $NAME -n $NAMESPACE -``` - -2. After that, we upgrade the version of the `Operator`. To achieve that, we patch the StatefulSet running the `Operator`: - -```console -# Assumes Operator is running in StatefulSet rook-cassandra-operator -# in namespace rook-cassandra-system - -kubectl set image sts/rook-cassandra-operator rook-cassandra-operator=rook/cassandra:v0.9.x -n rook-cassandra-system -``` - -After patching, ensure that the operator pods are running successfully: - -```console -kubectl get pods -n rook-cassandra-system -``` - -3. Recreate the manifests previously deleted: - -```console -kubectl apply -f $NAME.yaml -``` - -The `Operator` will pick up the newly created Cassandra Clusters and recreate them with the correct version of the sidecar. - -## Before Upgrading from v0.9.2 - -Do the following before proceeding: - -* For each member of each cluster: - -```console -POD=rook-cassandra-us-east-1-us-east-1a-0 -NAMESPACE=rook-cassandra - -# Change /var/lib/cassandra to /var/lib/scylla for a scylla cluster -kubectl exec $POD -n $NAMESPACE -- /bin/bash - -mkdir /var/lib/cassandra/data/data -shopt -s extglob -mv !(/var/lib/cassandra/data) /var/lib/cassandra/data/data -``` - -After that continue with [the upgrade procedure](#procedure). diff --git a/Documentation/cassandra.md b/Documentation/cassandra.md deleted file mode 100644 index 91abe31123df..000000000000 --- a/Documentation/cassandra.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: Cassandra -weight: 250 -indent: true ---- -{% include_relative branch.liquid %} - -# Cassandra Quickstart - -[Cassandra](http://cassandra.apache.org/) is a highly available, fault tolerant, peer-to-peer NoSQL database featuring lightning fast performance and tunable consistency. It provides massive scalability with no single point of failure. - -[Scylla](https://www.scylladb.com) is a close-to-the-hardware rewrite of Cassandra in C++. It features a shared nothing architecture that enables true linear scaling and major hardware optimizations that achieve ultra-low latencies and extreme throughput. It is a drop-in replacement for Cassandra and uses the same interfaces, so it is also supported by Rook. - -## Prerequisites - -A Kubernetes cluster (v1.16 or higher) is necessary to run the Rook Cassandra operator. -To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md) (the flexvolume plugin is not necessary for Cassandra) - -## Deploy Cassandra Operator - -First deploy the Rook Cassandra Operator using the following commands: - -```console -$ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/cassandra -kubectl apply -f crds.yaml -kubectl apply -f operator.yaml -``` - -This will install the operator in namespace rook-cassandra-system. You can check if the operator is up and running with: - -```console -kubectl -n rook-cassandra-system get pod -``` - -## Create and Initialize a Cassandra/Scylla Cluster - -Now that the operator is running, we can create an instance of a Cassandra/Scylla cluster by creating an instance of the `clusters.cassandra.rook.io` resource. -Some of that resource's values are configurable, so feel free to browse `cluster.yaml` and tweak the settings to your liking. -Full details for all the configuration options can be found in the [Cassandra Cluster CRD documentation](cassandra-cluster-crd.md). - -When you are ready to create a Cassandra cluster, simply run: - -```console -kubectl create -f cluster.yaml -``` - -We can verify that a Kubernetes object has been created that represents our new Cassandra cluster with the command below. -This is important because it shows that Rook has successfully extended Kubernetes to make Cassandra clusters a first class citizen in the Kubernetes cloud-native environment. - -```console -kubectl -n rook-cassandra get clusters.cassandra.rook.io -``` - -To check if all the desired members are running, you should see the same number of entries from the following command as the number of members that was specified in `cluster.yaml`: - -```console -kubectl -n rook-cassandra get pod -l app=rook-cassandra -``` - -You can also track the state of a Cassandra cluster from its status. To check the current status of a Cluster, run: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - -## Accessing the Database - -* From kubectl: - -To get a `cqlsh` shell in your new Cluster: - -```console -kubectl exec -n rook-cassandra -it rook-cassandra-east-1-east-1a-0 -- cqlsh -> DESCRIBE KEYSPACES; -``` - -* From inside a Pod: - -When you create a new Cluster, Rook automatically creates a Service for the clients to use in order to access the Cluster. The service's name follows the convention `-client`. You can see this Service in you cluster by running: - -```console -kubectl -n rook-cassandra describe service rook-cassandra-client -``` - -Pods running inside the Kubernetes cluster can use this Service to connect to Cassandra. -Here's an example using the [Python Driver](https://github.com/datastax/python-driver): - -```python -from cassandra.cluster import Cluster - -cluster = Cluster(['rook-cassandra-client.rook-cassandra.svc.cluster.local']) -session = cluster.connect() -``` - -## Scale Up - -The operator supports scale up of a rack as well as addition of new racks. To make the changes, you can use: - -```console -kubectl edit clusters.cassandra.rook.io rook-cassandra -``` - -* To scale up a rack, change the `Spec.Members` field of the rack to the desired value. -* To add a new rack, append the `racks` list with a new rack. Remember to choose a different rack name for the new rack. -* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - - -## Scale Down - -The operator supports scale down of a rack. To make the changes, you can use: - -```console -kubectl edit clusters.cassandra.rook.io rook-cassandra -``` - -* To scale down a rack, change the `Spec.Members` field of the rack to the desired value. -* After editing and saving the yaml, check your cluster's Status and Events for information on what's happening: - -```console -kubectl -n rook-cassandra describe clusters.cassandra.rook.io rook-cassandra -``` - -## Clean Up - -To clean up all resources associated with this walk-through, you can run the commands below. - -> **NOTE**: that this will destroy your database and delete all of its associated data. - -```console -kubectl delete -f cluster.yaml -kubectl delete -f operator.yaml -kubectl delete -f crds.yaml -``` - -## Troubleshooting - -If the cluster does not come up, the first step would be to examine the operator's logs: - -```console -kubectl -n rook-cassandra-system logs -l app=rook-cassandra-operator -``` - -If everything looks OK in the operator logs, you can also look in the logs for one of the Cassandra instances: - -```console -kubectl -n rook-cassandra logs rook-cassandra-0 -``` - -## Cassandra Monitoring - -To enable jmx_exporter for cassandra rack, you should specify `jmxExporterConfigMapName` option for rack in CassandraCluster CRD. - -For example: -```yaml -apiVersion: cassandra.rook.io/v1alpha1 -kind: Cluster -metadata: - name: my-cassandra - namespace: rook-cassandra -spec: - ... - datacenter: - name: my-datacenter - racks: - - name: my-rack - members: 3 - jmxExporterConfigMapName: jmx-exporter-settings - storage: - volumeClaimTemplates: - - metadata: - name: rook-cassandra-data - spec: - storageClassName: my-storage-class - resources: - requests: - storage: 200Gi -``` - -Simple config map example to get all metrics: -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: jmx-exporter-settings - namespace: rook-cassandra -data: - jmx_exporter_config.yaml: | - lowercaseOutputLabelNames: true - lowercaseOutputName: true - whitelistObjectNames: ["org.apache.cassandra.metrics:*"] -``` - -ConfigMap's data field must contain `jmx_exporter_config.yaml` key with jmx exporter settings. - -There is no automatic reloading mechanism for pods when the config map updated. -After the configmap changed, you should restart all rack pods manually: - -```bash -NAMESPACE= -CLUSTER= -RACKS=$(kubectl get sts -n ${NAMESPACE} -l "cassandra.rook.io/cluster=${CLUSTER}") -echo ${RACKS} | xargs -n1 kubectl rollout restart -n ${NAMESPACE} -``` diff --git a/Documentation/ceph-advanced-configuration.md b/Documentation/ceph-advanced-configuration.md index c949e3d8d105..f2913f0d22c4 100644 --- a/Documentation/ceph-advanced-configuration.md +++ b/Documentation/ceph-advanced-configuration.md @@ -22,6 +22,7 @@ storage cluster. * [OSD Dedicated Network](#osd-dedicated-network) * [Phantom OSD Removal](#phantom-osd-removal) * [Change Failure Domain](#change-failure-domain) +* [Auto Expansion of OSDs](#auto-expansion-of-OSDs) ## Prerequisites @@ -78,14 +79,14 @@ When you create the second CephCluster CR, use the same `NAMESPACE` and the oper ## Use custom Ceph user and secret for mounting -> **NOTE**: For extensive info about creating Ceph users, consult the Ceph documentation: http://docs.ceph.com/docs/mimic/rados/operations/user-management/#add-a-user. +> **NOTE**: For extensive info about creating Ceph users, consult the Ceph documentation: https://docs.ceph.com/en/latest/rados/operations/user-management/#add-a-user. Using a custom Ceph user and secret can be done for filesystem and block storage. -Create a custom user in Ceph with read-write access in the `/bar` directory on CephFS (For Ceph Mimic or newer, use `data=POOL_NAME` instead of `pool=POOL_NAME`): +Create a custom user in Ceph with read-write access in the `/bar` directory on CephFS: ```console -$ ceph auth get-or-create-key client.user1 mon 'allow r' osd 'allow rw tag cephfs pool=YOUR_FS_DATA_POOL' mds 'allow r, allow rw path=/bar' +$ ceph auth get-or-create-key client.user1 mon 'allow r' osd 'allow rw tag cephfs data=YOUR_FS_DATA_POOL' mds 'allow r, allow rw path=/bar' ``` The command will return a Ceph secret key, this key should be added as a secret in Kubernetes like this: @@ -109,7 +110,7 @@ mountSecret: ceph-user1-secret If you want the Rook Ceph agent to require a `mountUser` and `mountSecret` to be set in StorageClasses using Rook, you must set the environment variable `AGENT_MOUNT_SECURITY_MODE` to `Restricted` on the Rook Ceph operator Deployment. -For more information on using the Ceph feature to limit access to CephFS paths, see [Ceph Documentation - Path Restriction](http://docs.ceph.com/docs/mimic/cephfs/client-auth/#path-restriction). +For more information on using the Ceph feature to limit access to CephFS paths, see [Ceph Documentation - Path Restriction](https://docs.ceph.com/en/latest/cephfs/client-auth/#path-restriction). ### ClusterRole @@ -590,3 +591,39 @@ ceph osd pool get replicapool crush_rule If the cluster's health was `HEALTH_OK` when we performed this change, immediately, the new rule is applied to the cluster transparently without service disruption. Exactly the same approach can be used to change from `host` back to `osd`. + +## Auto Expansion of OSDs + +### Prerequisites + +1) A [PVC-based cluster](ceph-cluster-crd.md#pvc-based-cluster) deployed in dynamic provisioning environment with a `storageClassDeviceSet`. + +2) Create the Rook [Toolbox](ceph-toolbox.md). + +>Note: [Prometheus Operator](ceph-monitoring.md#prometheus-operator) and [Prometheus Instances](ceph-monitoring.md#prometheus-instances) are Prerequisites that are created by the auto-grow-storage script. + +### To scale OSDs Vertically + +Run the following script to auto-grow the size of OSDs on a PVC-based Rook-Ceph cluster whenever the OSDs have reached the storage near-full threshold. +```console +tests/scripts/auto-grow-storage.sh size --max maxSize --growth-rate percent +``` +>growth-rate percentage represents the percent increase you want in the OSD capacity and maxSize represent the maximum disk size. + +For example, if you need to increase the size of OSD by 30% and max disk size is 1Ti +```console +./auto-grow-storage.sh size --max 1Ti --growth-rate 30 +``` + +### To scale OSDs Horizontally + +Run the following script to auto-grow the number of OSDs on a PVC-based Rook-Ceph cluster whenever the OSDs have reached the storage near-full threshold. +```console +tests/scripts/auto-grow-storage.sh count --max maxCount --count rate +``` +>Count of OSD represents the number of OSDs you need to add and maxCount represents the number of disks a storage cluster will support. + +For example, if you need to increase the number of OSDs by 3 and maxCount is 10 +```console +./auto-grow-storage.sh count --max 10 --count 3 +``` diff --git a/Documentation/ceph-block.md b/Documentation/ceph-block.md index f3b184c95dea..62615a2231f3 100644 --- a/Documentation/ceph-block.md +++ b/Documentation/ceph-block.md @@ -11,7 +11,7 @@ Block storage allows a single pod to mount storage. This guide shows how to crea ## Prerequisites -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). +This guide assumes a Rook cluster as explained in the [Quickstart](quickstart.md). ## Provision Storage @@ -83,6 +83,10 @@ parameters: # Delete the rbd volume when a PVC is deleted reclaimPolicy: Delete + +# Optional, if you want to add dynamic resize for PVC. Works for Kubernetes 1.14+ +# For now only ext3, ext4, xfs resize support provided, like in Kubernetes itself. +allowVolumeExpansion: true ``` If you've deployed the Rook operator in a namespace other than "rook-ceph", diff --git a/Documentation/ceph-client-crd.md b/Documentation/ceph-client-crd.md index 6cae6ffcd3da..7a8fccd67b63 100644 --- a/Documentation/ceph-client-crd.md +++ b/Documentation/ceph-client-crd.md @@ -44,4 +44,4 @@ spec: ### Prerequisites -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) +This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](quickstart.md) diff --git a/Documentation/ceph-cluster-crd.md b/Documentation/ceph-cluster-crd.md index 662012d1d27f..7a786ec51ca0 100755 --- a/Documentation/ceph-cluster-crd.md +++ b/Documentation/ceph-cluster-crd.md @@ -32,7 +32,7 @@ metadata: spec: cephVersion: # see the "Cluster Settings" section below for more details on which image of ceph to run - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -40,6 +40,7 @@ spec: storage: useAllNodes: true useAllDevices: true + onlyApplyOSDPlacement: false ``` ## PVC-based Cluster @@ -59,7 +60,7 @@ metadata: spec: cephVersion: # see the "Cluster Settings" section below for more details on which image of ceph to run - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -88,6 +89,7 @@ spec: volumeMode: Block accessModes: - ReadWriteOnce + onlyApplyOSDPlacement: false ``` For a more advanced scenario, such as adding a dedicated device you can refer to the [dedicated metadata device for OSD on PVC section](#dedicated-metadata-and-wal-device-for-osd-on-pvc). @@ -127,7 +129,7 @@ spec: - name: c cephVersion: # Stretch cluster is supported in Ceph Pacific or newer. - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: true # Either storageClassDeviceSets or the storage section can be specified for creating OSDs. # This example uses all devices for simplicity. @@ -165,7 +167,7 @@ Settings can be specified at the global level to apply to the cluster as a whole * `external`: * `enable`: if `true`, the cluster will not be managed by Rook but via an external entity. This mode is intended to connect to an existing cluster. In this case, Rook will only consume the external cluster. However, Rook will be able to deploy various daemons in Kubernetes such as object gateways, mds and nfs if an image is provided and will refuse otherwise. If this setting is enabled **all** the other options will be ignored except `cephVersion.image` and `dataDirHostPath`. See [external cluster configuration](#external-cluster). If `cephVersion.image` is left blank, Rook will refuse the creation of extra CRs like object, file and nfs. * `cephVersion`: The version information for launching the ceph daemons. - * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v15.2.12` or `v16.2.5`. For more details read the [container images section](#ceph-container-images). + * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v15.2.12` or `v16.2.6`. For more details read the [container images section](#ceph-container-images). For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/). To ensure a consistent version of the image is running across all nodes in the cluster, it is recommended to use a very specific image version. Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v14` will be updated each time a new nautilus build is released. @@ -216,7 +218,9 @@ For more details on the mons and when to choose a number other than `3`, see the * `config`: Config settings applied to all OSDs on the node unless overridden by `devices`. See the [config settings](#osd-configuration-settings) below. * [storage selection settings](#storage-selection-settings) * [Storage Class Device Sets](#storage-class-device-sets) -* `disruptionManagement`: The section for configuring management of daemon disruptions + * `onlyApplyOSDPlacement`: Whether the placement specific for OSDs is merged with the `all` placement. If `false`, the OSD placement will be merged with the `all` placement. If true, the `OSD placement will be applied` and the `all` placement will be ignored. The placement for OSDs is computed from several different places depending on the type of OSD: + - For non-PVCs: `placement.all` and `placement.osd` + - For PVCs: `placement.all` and inside the storageClassDeviceSet from the `placement` or `preparePlacement` * `managePodBudgets`: if `true`, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will block eviction of OSDs by default and unblock them safely when drains are detected. * `osdMaintenanceTimeout`: is a duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. * `manageMachineDisruptionBudgets`: if `true`, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. Only available on OpenShift. @@ -243,7 +247,12 @@ A specific will contain a specific release of Ceph as well as security fixes fro ### Mon Settings -* `count`: Set the number of mons to be started. The number must be odd and between `1` and `9`. If not specified the default is set to `3`. +* `count`: Set the number of mons to be started. The number must be between `1` and `9`. The recommended value is most commonly `3`. + For highest availability, an odd number of mons should be specified. + For higher durability in case of mon loss, an even number can be specified although availability may be lower. + To maintain quorum a majority of mons must be up. For example, if there are three mons, two must be up. + If there are four mons, three must be up. If there are two mons, both must be up. + If quorum is lost, see the [disaster recovery guide](ceph-disaster-recovery.md#restoring-mon-quorum) to restore quorum from a single mon. * `allowMultiplePerNode`: Whether to allow the placement of multiple mons on a single node. Default is `false` for production. Should only be set to `true` in test environments. * `volumeClaimTemplate`: A `PersistentVolumeSpec` used by Rook to create PVCs for monitor storage. This field is optional, and when not provided, HostPath @@ -340,6 +349,9 @@ Based on the configuration, the operator will do the following: \* Internal cluster traffic includes OSD heartbeats, data replication, and data recovery +Only OSD pods will have both Public and Cluster networks attached. The rest of the Ceph component pods and CSI pods will only have the Public network attached. +Rook Ceph Operator will not have any networks attached as it proxies the required commands via a sidecar container in the mgr pod. + In order to work, each selector value must match a `NetworkAttachmentDefinition` object name in Multus. For `multus` network provider, an already working cluster with Multus networking is required. Network attachment definition that later will be attached to the cluster needs to be created before the Cluster CRD. @@ -379,6 +391,10 @@ spec: * This format is required in order to use the NetworkAttachmentDefinition across namespaces. * In Openshift, to use a NetworkAttachmentDefinition (NAD) across namespaces, the NAD must be deployed in the `default` namespace. The NAD is then referenced with the namespace: `default/rook-public-nw` +#### Known issues with multus +When a CephFS/RBD volume is mounted in a Pod using cephcsi and then the CSI CephFS/RBD plugin is restarted or terminated (e.g. by restarting or deleting its DaemonSet), all operations on the volume become blocked, even after restarting the CSI pods. The only workaround is to restart the node where the cephcsi plugin pod was restarted. +This issue is tracked [here](https://github.com/rook/rook/issues/8085). + #### IPFamily Provide single-stack IPv4 or IPv6 protocol to assign corresponding addresses to pods and services. This field is optional. Possible inputs are IPv6 and IPv4. Empty value will be treated as IPv4. Kubernetes version should be at least v1.13 to run IPv6. Dual-stack is supported as of ceph Pacific. @@ -435,7 +451,7 @@ Below are the settings for host-based cluster. This type of cluster can specify * `config`: Device-specific config settings. See the [config settings](#osd-configuration-settings) below Host-based cluster only supports raw device and partition. Be sure to see the -[Ceph quickstart doc prerequisites](ceph-quickstart.md#prerequisites) for additional considerations. +[Ceph quickstart doc prerequisites](quickstart.md#prerequisites) for additional considerations. Below are the settings for a PVC-based cluster. @@ -487,15 +503,9 @@ The following storage selection settings are specific to Ceph and do not apply t * `initialWeight`: The initial OSD weight in TiB units. By default, this value is derived from OSD's capacity. * `primaryAffinity`: The [primary-affinity](https://docs.ceph.com/en/latest/rados/operations/crush-map/#primary-affinity) value of an OSD, within range `[0, 1]` (default: `1`). * `osdsPerDevice`**: The number of OSDs to create on each device. High performance devices such as NVMe can handle running multiple OSDs. If desired, this can be overridden for each node and each device. -* `encryptedDevice`**: Encrypt OSD volumes using dmcrypt ("true" or "false"). By default this option is disabled. See [encryption](http://docs.ceph.com/docs/nautilus/ceph-volume/lvm/encryption/) for more information on encryption in Ceph. +* `encryptedDevice`**: Encrypt OSD volumes using dmcrypt ("true" or "false"). By default this option is disabled. See [encryption](http://docs.ceph.com/docs/master/ceph-volume/lvm/encryption/) for more information on encryption in Ceph. * `crushRoot`: The value of the `root` CRUSH map label. The default is `default`. Generally, you should not need to change this. However, if any of your topology labels may have the value `default`, you need to change `crushRoot` to avoid conflicts, since CRUSH map values need to be unique. -**NOTE**: Depending on the Ceph image running in your cluster, OSDs will be configured differently. Newer images will configure OSDs with `ceph-volume`, which provides support for `osdsPerDevice`, `encryptedDevice`, as well as other features that will be exposed in future Rook releases. OSDs created prior to Rook v0.9 or with older images of Luminous and Mimic are not created with `ceph-volume` and thus would not support the same features. For `ceph-volume`, the following images are supported: - -* Luminous 12.2.10 or newer -* Mimic 13.2.3 or newer -* Nautilus - ### Annotations and Labels Annotations and Labels can be specified so that the Rook components will have those annotations / labels added to them. @@ -507,6 +517,8 @@ You can set annotations / labels for Rook components for the list of key value p * `mon`: Set annotations / labels for mons * `osd`: Set annotations / labels for OSDs * `prepareosd`: Set annotations / labels for OSD Prepare Jobs +* `monitoring`: Set annotations / labels for service monitor +* `crashcollector`: Set annotations / labels for crash collectors When other keys are set, `all` will be merged together with the specific component. ### Placement Configuration Settings @@ -675,8 +687,8 @@ kubectl -n rook-ceph get CephCluster -o yaml deviceClasses: - name: hdd version: - image: quay.io/ceph/ceph:v16.2.5 - version: 16.2.5-0 + image: quay.io/ceph/ceph:v16.2.6 + version: 16.2.6-0 conditions: - lastHeartbeatTime: "2021-03-02T21:22:11Z" lastTransitionTime: "2021-03-02T21:21:09Z" @@ -737,7 +749,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -769,7 +781,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -809,7 +821,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -856,7 +868,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -962,7 +974,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -1008,7 +1020,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: false dashboard: enabled: true @@ -1037,7 +1049,7 @@ spec: operator: In values: - cluster1 - topologyKey: "topology.kubernetes.io/zone" + topologyKey: "topology.kubernetes.io/zone" volumeClaimTemplates: - metadata: name: data @@ -1466,7 +1478,7 @@ spec: enable: true dataDirHostPath: /var/lib/rook cephVersion: - image: quay.io/ceph/ceph:v16.2.5 # Should match external cluster version + image: quay.io/ceph/ceph:v16.2.6 # Should match external cluster version ``` ### Security diff --git a/Documentation/ceph-common-issues.md b/Documentation/ceph-common-issues.md index 0db2b831002f..2eb14de80164 100644 --- a/Documentation/ceph-common-issues.md +++ b/Documentation/ceph-common-issues.md @@ -25,12 +25,12 @@ If after trying the suggestions found on this page and the problem is not resolv * [Using multiple shared filesystem (CephFS) is attempted on a kernel version older than 4.7](#using-multiple-shared-filesystem-cephfs-is-attempted-on-a-kernel-version-older-than-47) * [Set debug log level for all Ceph daemons](#set-debug-log-level-for-all-ceph-daemons) * [Activate log to file for a particular Ceph daemon](#activate-log-to-file-for-a-particular-ceph-daemon) -* [Flex storage class versus Ceph CSI storage class](#flex-storage-class-versus-ceph-csi-storage-class) * [A worker node using RBD devices hangs up](#a-worker-node-using-rbd-devices-hangs-up) * [Too few PGs per OSD warning is shown](#too-few-pgs-per-osd-warning-is-shown) * [LVM metadata can be corrupted with OSD on LV-backed PVC](#lvm-metadata-can-be-corrupted-with-osd-on-lv-backed-pvc) * [OSD prepare job fails due to low aio-max-nr setting](#osd-prepare-job-fails-due-to-low-aio-max-nr-setting) * [Failed to create CRDs](#failed-to-create-crds) +* [Unexpected partitions created](#unexpected-partitions-created) See also the [CSI Troubleshooting Guide](ceph-csi-troubleshooting.md). @@ -73,7 +73,7 @@ Here are some common commands to troubleshoot a Ceph cluster: The first two status commands provide the overall cluster health. The normal state for cluster operations is HEALTH_OK, but will still function when the state is in a HEALTH_WARN state. If you are in a WARN state, then the cluster is in a condition that it may enter the HEALTH_ERROR state at which point *all* disk I/O operations are halted. If a HEALTH_WARN state is observed, then one should take action to prevent the cluster from halting when it enters the HEALTH_ERROR state. -There are many Ceph sub-commands to look at and manipulate Ceph objects, well beyond the scope this document. See the [Ceph documentation](https://docs.ceph.com/) for more details of gathering information about the health of the cluster. In addition, there are other helpful hints and some best practices located in the [Advanced Configuration section](advanced-configuration.md). Of particular note, there are scripts for collecting logs and gathering OSD information there. +There are many Ceph sub-commands to look at and manipulate Ceph objects, well beyond the scope this document. See the [Ceph documentation](https://docs.ceph.com/) for more details of gathering information about the health of the cluster. In addition, there are other helpful hints and some best practices located in the [Advanced Configuration section](ceph-advanced-configuration.md). Of particular note, there are scripts for collecting logs and gathering OSD information there. ## Pod Using Ceph Storage Is Not Running @@ -92,7 +92,7 @@ If you see that the PVC remains in **pending** state, see the topic [PVCs stay i ### Possible Solutions Summary -* `rook-ceph-agent` pod is in a `CrashLoopBackOff` status because it cannot deploy its driver on a read-only filesystem: [Flexvolume configuration pre-reqs](./ceph-prerequisites.md#ceph-flexvolume-configuration) +* `rook-ceph-agent` pod is in a `CrashLoopBackOff` status because it cannot deploy its driver on a read-only filesystem: [Flexvolume configuration pre-reqs](./prerequisites.md#ceph-flexvolume-configuration) * Persistent Volume and/or Claim are failing to be created and bound: [Volume Creation](#volume-creation) * `rook-ceph-agent` pod is failing to mount and format the volume: [Rook Agent Mounting](#volume-mounting) @@ -165,7 +165,7 @@ First, clean up the agent deployment with: kubectl -n rook-ceph delete daemonset rook-ceph-agent ``` -Once the `rook-ceph-agent` pods are gone, **follow the instructions in the [Flexvolume configuration pre-reqs](./ceph-prerequisites.md#ceph-flexvolume-configuration)** to ensure a good value for `--volume-plugin-dir` has been provided to the Kubelet. +Once the `rook-ceph-agent` pods are gone, **follow the instructions in the [Flexvolume configuration pre-reqs](./prerequisites.md#ceph-flexvolume-configuration)** to ensure a good value for `--volume-plugin-dir` has been provided to the Kubelet. After that has been configured, and the Kubelet has been restarted, start the agent pods up again by restarting `rook-operator`: ```console @@ -824,7 +824,6 @@ They are cases where looking at Kubernetes logs is not enough for diverse reason So for each daemon, `dataDirHostPath` is used to store logs, if logging is activated. Rook will bindmount `dataDirHostPath` for every pod. -As of Ceph Nautilus 14.2.1, it is possible to enable logging for a particular daemon on the fly. Let's say you want to enable logging for `mon.a`, but only for this daemon. Using the toolbox or from inside the operator run: @@ -837,24 +836,6 @@ You don't need to restart the pod, the effect will be immediate. To disable the logging on file, simply set `log_to_file` to `false`. -For Ceph Luminous/Mimic releases, `mon_cluster_log_file` and `cluster_log_file` can be set to -`/var/log/ceph/XXXX` in the config override ConfigMap to enable logging. See the (Advanced -Documentation)[Documentation/advanced-configuration.md#kubernetes] for information about how to use -the config override ConfigMap. - -For Ceph Luminous/Mimic releases, `mon_cluster_log_file` and `cluster_log_file` can be set to `/var/log/ceph/XXXX` in the config override ConfigMap to enable logging. See the [Advanced Documentation](#custom-cephconf-settings) for information about how to use the config override ConfigMap. - -## Flex storage class versus Ceph CSI storage class - -Since Rook 1.1, Ceph CSI has become stable and moving forward is the ultimate replacement over the Flex driver. -However, not all Flex storage classes are available through Ceph CSI since it's basically catching up on features. -Ceph CSI in its 1.2 version (with Rook 1.1) does not support the Erasure coded pools storage class. - -So, if you are looking at using such storage class you should enable the Flex driver by setting `ROOK_ENABLE_FLEX_DRIVER: true` in your `operator.yaml`. -Also, if you are in the need of specific features and wonder if CSI is capable of handling them, you should read [the ceph-csi support matrix](https://github.com/ceph/ceph-csi#support-matrix). - -See also the [CSI Troubleshooting Guide](ceph-csi-troubleshooting.md). - ## A worker node using RBD devices hangs up ### Symptoms diff --git a/Documentation/ceph-configuration.md b/Documentation/ceph-configuration.md index a379258ab720..751337cffd15 100644 --- a/Documentation/ceph-configuration.md +++ b/Documentation/ceph-configuration.md @@ -29,11 +29,7 @@ of OSDs the user expects to have backing each pool. The Ceph [OSD and Pool confi docs](https://docs.ceph.com/docs/master/rados/operations/placement-groups/#a-preselection-of-pg-num) provide detailed information about how to tune these parameters: `osd_pool_default_pg_num` and `osd_pool_default_pgp_num`. -Pools created prior to v1.1 will have a default PG count of 100. Pools created after v1.1 -will have Ceph's default PG count. - -An easier option exists for Rook-Ceph clusters running Ceph Nautilus (v14.2.x) or newer. Nautilus -[introduced the PG auto-scaler mgr module](https://ceph.com/rados/new-in-nautilus-pg-merging-and-autotuning/) +Nautilus [introduced the PG auto-scaler mgr module](https://ceph.com/rados/new-in-nautilus-pg-merging-and-autotuning/) capable of automatically managing PG and PGP values for pools. Please see [Ceph New in Nautilus: PG merging and autotuning](https://ceph.io/rados/new-in-nautilus-pg-merging-and-autotuning/) for more information about this module. @@ -49,7 +45,7 @@ spec: enabled: true ``` -In Octopus (v15.2.x), this module is enabled by default without the above-mentioned setting. +In Octopus (v15.2.x) and newer, this module is enabled by default without the above-mentioned setting. With that setting, the autoscaler will be enabled for all new pools. If you do not desire to have the autoscaler enabled for all new pools, you will need to use the Rook toolbox to enable the module diff --git a/Documentation/ceph-csi-drivers.md b/Documentation/ceph-csi-drivers.md index ccc3ba244d8b..cf6572b36ce2 100644 --- a/Documentation/ceph-csi-drivers.md +++ b/Documentation/ceph-csi-drivers.md @@ -3,7 +3,7 @@ title: Ceph CSI weight: 3200 indent: true --- - +{% include_relative branch.liquid %} # Ceph CSI Drivers There are two CSI drivers integrated with Rook that will enable different scenarios: @@ -19,6 +19,10 @@ For documentation on consuming the storage: * RBD: See the [Block Storage](ceph-block.md) topic * CephFS: See the [Shared Filesystem](ceph-filesystem.md) topic +## Supported Versions +The supported Ceph CSI version is 3.3.0 or greater with Rook. Refer to ceph csi [releases](https://github.com/ceph/ceph-csi/releases) +for more information. + ## Static Provisioning Both drivers also support the creation of static PV and static PVC from existing RBD image/CephFS volume. Refer to [static PVC](https://github.com/ceph/ceph-csi/blob/devel/docs/static-pvc.md) for more information. @@ -73,8 +77,52 @@ PVC will be updated to new size. ## RBD Mirroring To support RBD Mirroring, the [Volume Replication Operator](https://github.com/csi-addons/volume-replication-operator/blob/main/README.md) will be started in the RBD provisioner pod. -Volume Replication Operator is a kubernetes operator that provides common and reusable APIs for storage disaster recovery. It is based on [csi-addons/spec](https://github.com/csi-addons/spec) specification and can be used by any storage provider. -It follows controller pattern and provides extended APIs for storage disaster recovery. The extended APIs are provided via Custom Resource Definition (CRD). -To enable volume replication: -- For Helm deployments see the [helm settings](helm-operator.md#configuration). -- For non-Helm deployments set `CSI_ENABLE_VOLUME_REPLICATION: "true"` in the operator.yaml +The Volume Replication Operator is a kubernetes operator that provides common and reusable APIs for storage disaster recovery. It is based on [csi-addons/spec](https://github.com/csi-addons/spec) specification and can be used by any storage provider. +It follows the controller pattern and provides extended APIs for storage disaster recovery. The extended APIs are provided via Custom Resource Definitions (CRDs). + +### Enable volume replication + +1. Install the volume replication CRDs: + +```console +kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml +kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml +``` + +2. Enable the volume replication controller: + - For Helm deployments see the [csi.volumeReplication.enabled setting](helm-operator.md#configuration). + - For non-Helm deployments set `CSI_ENABLE_VOLUME_REPLICATION: "true"` in operator.yaml + +## Ephemeral volume support + +The generic ephemeral volume feature adds support for specifying PVCs in the +`volumes` field to indicate a user would like to create a Volume as part of the pod spec. +This feature requires the GenericEphemeralVolume feature gate to be enabled. + +For example: + +```yaml +kind: Pod +apiVersion: v1 +... + volumes: + - name: mypvc + ephemeral: + volumeClaimTemplate: + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "rook-ceph-block" + resources: + requests: + storage: 1Gi +``` + +A volume claim template is defined inside the pod spec which refers to a volume +provisioned and used by the pod with its lifecycle. The volumes are provisioned +when pod get spawned and destroyed at time of pod delete. + +Refer to [ephemeral-doc](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) for more info. +Also, See the example manifests for an [RBD ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml) and a [CephFS ephemeral volume](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml). + +### Prerequisites +Kubernetes version 1.21 or greater is required. diff --git a/Documentation/ceph-csi-troubleshooting.md b/Documentation/ceph-csi-troubleshooting.md index d27abee4feab..34507c47f8e1 100644 --- a/Documentation/ceph-csi-troubleshooting.md +++ b/Documentation/ceph-csi-troubleshooting.md @@ -441,3 +441,57 @@ $ rbd ls --id=csi-rbd-node -m=10.111.136.166:6789 --key=AQDpIQhg+v83EhAAgLboWIbl ``` Where `-m` is one of the mon endpoints and the `--key` is the key used by the CSI driver for accessing the Ceph cluster. + +## Node Loss + +When a node is lost, you will see application pods on the node stuck in the `Terminating` state while another pod is rescheduled and is in the `ContainerCreating` state. + +To allow the application pod to start on another node, force delete the pod. + +### Force deleting the pod + +To force delete the pod stuck in the `Terminating` state: + +```console +$ kubectl -n rook-ceph delete pod my-app-69cd495f9b-nl6hf --grace-period 0 --force +``` + +After the force delete, wait for a timeout of about 8-10 minutes. If the pod still not in the running state, continue with the next section to blocklist the node. + +### Blocklisting a node + +To shorten the timeout, you can mark the node as "blocklisted" from the [Rook toolbox](ceph-toolbox.md) so Rook can safely failover the pod sooner. + +If the Ceph version is at least Pacific(v16.2.x), run the following command: + +```console +$ ceph osd blocklist add # get the node IP you want to blocklist +blocklisting +``` + +If the Ceph version is Octopus(v15.2.x) or older, run the following command: + +```console +$ ceph osd blacklist add # get the node IP you want to blacklist +blacklisting +``` + +After running the above command within a few minutes the pod will be running. + +### Removing a node blocklist + +After you are absolutely sure the node is permanently offline and that the node no longer needs to be blocklisted, remove the node from the blocklist. + +If the Ceph version is at least Pacific(v16.2.x), run: + +```console +$ ceph osd blocklist rm +un-blocklisting +``` + +If the Ceph version is Octopus(v15.2.x) or older, run: + +```console +$ ceph osd blacklist rm # get the node IP you want to blacklist +un-blacklisting +``` diff --git a/Documentation/ceph-dashboard.md b/Documentation/ceph-dashboard.md index 8f749065497c..42658217570d 100755 --- a/Documentation/ceph-dashboard.md +++ b/Documentation/ceph-dashboard.md @@ -48,7 +48,7 @@ in this example at `https://10.110.113.240:8443`. ### Login Credentials After you connect to the dashboard you will need to login for secure access. Rook creates a default user named -`admin` and generates a secret called `rook-ceph-dashboard-admin-password` in the namespace where the Rook Ceph cluster is running. +`admin` and generates a secret called `rook-ceph-dashboard-password` in the namespace where the Rook Ceph cluster is running. To retrieve the generated password, you can run the following: ```console diff --git a/Documentation/ceph-filesystem.md b/Documentation/ceph-filesystem.md index 0988ae4dd144..782544db3911 100644 --- a/Documentation/ceph-filesystem.md +++ b/Documentation/ceph-filesystem.md @@ -13,7 +13,7 @@ This example runs a shared filesystem for the [kube-registry](https://github.com ## Prerequisites -This guide assumes you have created a Rook cluster as explained in the main [Kubernetes guide](ceph-quickstart.md) +This guide assumes you have created a Rook cluster as explained in the main [Kubernetes guide](quickstart.md) ### Multiple Filesystems Support @@ -91,7 +91,8 @@ metadata: # Change "rook-ceph" provisioner prefix to match the operator namespace if needed provisioner: rook-ceph.cephfs.csi.ceph.com parameters: - # clusterID is the namespace where operator is deployed. + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined clusterID: rook-ceph # CephFS filesystem name into which the volume shall be created diff --git a/Documentation/ceph-fs-mirror-crd.md b/Documentation/ceph-fs-mirror-crd.md index 192ea65009d8..4d1c13cf47ff 100644 --- a/Documentation/ceph-fs-mirror-crd.md +++ b/Documentation/ceph-fs-mirror-crd.md @@ -3,9 +3,10 @@ title: FilesystemMirror CRD weight: 3600 indent: true --- + {% include_relative branch.liquid %} -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) +This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](quickstart.md) # Ceph FilesystemMirror CRD @@ -26,75 +27,23 @@ metadata: namespace: rook-ceph ``` - -## Configuring mirroring peers - -On an external site you want to mirror with, you need to create a bootstrap peer token. -The token will be used by one site to **pull** images from the other site. -The following assumes the name of the pool is "test" and the site name "europe" (just like the region), so we will be pulling images from this site: - -```console -external-cluster-console # ceph fs snapshot mirror peer_bootstrap create myfs2 client.mirror europe -{"token": "eyJmc2lkIjogIjgyYjdlZDkyLTczYjAtNGIyMi1hOGI3LWVkOTQ4M2UyODc1NiIsICJmaWxlc3lzdGVtIjogIm15ZnMyIiwgInVzZXIiOiAiY2xpZW50Lm1pcnJvciIsICJzaXRlX25hbWUiOiAidGVzdCIsICJrZXkiOiAiQVFEVVAxSmdqM3RYQVJBQWs1cEU4cDI1ZUhld2lQK0ZXRm9uOVE9PSIsICJtb25faG9zdCI6ICJbdjI6MTAuOTYuMTQyLjIxMzozMzAwLHYxOjEwLjk2LjE0Mi4yMTM6Njc4OV0sW3YyOjEwLjk2LjIxNy4yMDc6MzMwMCx2MToxMC45Ni4yMTcuMjA3OjY3ODldLFt2MjoxMC45OS4xMC4xNTc6MzMwMCx2MToxMC45OS4xMC4xNTc6Njc4OV0ifQ=="} -``` - -For more details, refer to the official ceph-fs mirror documentation on [how to create a bootstrap peer](https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers). - -When the peer token is available, you need to create a Kubernetes Secret, it can named anything. -Our `europe-cluster-peer-fs-test-1` will have to be created manually, like so: - -```console -$ kubectl -n rook-ceph create secret generic "europe-cluster-peer-fs-test-1" \ ---from-literal=token=eyJmc2lkIjogIjgyYjdlZDkyLTczYjAtNGIyMi1hOGI3LWVkOTQ4M2UyODc1NiIsICJmaWxlc3lzdGVtIjogIm15ZnMyIiwgInVzZXIiOiAiY2xpZW50Lm1pcnJvciIsICJzaXRlX25hbWUiOiAidGVzdCIsICJrZXkiOiAiQVFEVVAxSmdqM3RYQVJBQWs1cEU4cDI1ZUhld2lQK0ZXRm9uOVE9PSIsICJtb25faG9zdCI6ICJbdjI6MTAuOTYuMTQyLjIxMzozMzAwLHYxOjEwLjk2LjE0Mi4yMTM6Njc4OV0sW3YyOjEwLjk2LjIxNy4yMDc6MzMwMCx2MToxMC45Ni4yMTcuMjA3OjY3ODldLFt2MjoxMC45OS4xMC4xNTc6MzMwMCx2MToxMC45OS4xMC4xNTc6Njc4OV0ifQ== -``` - -Rook will read a `token` key of the Data content of the Secret. - -You can now create the mirroring CR: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephFilesystemMirror -metadata: - name: my-fs-mirror - namespace: rook-ceph -spec: - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" -``` - -You can add more filesystems by repeating the above and changing the "token" value of the Kubernetes Secret. -So the list might eventually look like: - -```yaml - peers: - secretNames: - - "europe-cluster-peer-fs-test-1" - - "europe-cluster-peer-fs-test-2" - - "europe-cluster-peer-fs-test-3" -``` - -Along with three Kubernetes Secret. - - ## Settings If any setting is unspecified, a suitable default will be used automatically. ### FilesystemMirror metadata -* `name`: The name that will be used for the Ceph cephfs-mirror daemon. -* `namespace`: The Kubernetes namespace that will be created for the Rook cluster. The services, pods, and other resources created by the operator will be added to this namespace. +- `name`: The name that will be used for the Ceph cephfs-mirror daemon. +- `namespace`: The Kubernetes namespace that will be created for the Rook cluster. The services, pods, and other resources created by the operator will be added to this namespace. ### FilesystemMirror Settings -* `peers`: to configure mirroring peers - * `secretNames`: a list of peers to connect to. Currently (Ceph Pacific release) **only a single** peer is supported where a peer represents a Ceph cluster. - However, if you want to enable mirroring of multiple filesystems, you would have to have **one Secret per filesystem**. -* `placement`: The cephfs-mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). -* `annotations`: Key value pair list of annotations to add. -* `labels`: Key value pair list of labels to add. -* `resources`: The resource requirements for the cephfs-mirror pods. -* `priorityClassName`: The priority class to set on the cephfs-mirror pods. +- `placement`: The cephfs-mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). +- `annotations`: Key value pair list of annotations to add. +- `labels`: Key value pair list of labels to add. +- `resources`: The resource requirements for the cephfs-mirror pods. +- `priorityClassName`: The priority class to set on the cephfs-mirror pods. + +## Configuring mirroring peers +In order to configure mirroring peers, please refer to the [CephFilesystem documentation](ceph-filesystem-crd.md#mirroring). diff --git a/Documentation/ceph-mon-health.md b/Documentation/ceph-mon-health.md index f67cb5e654db..0ae91a30b285 100644 --- a/Documentation/ceph-mon-health.md +++ b/Documentation/ceph-mon-health.md @@ -34,9 +34,9 @@ quorum and perform operations in the cluster. If the majority of mons are not ru Most commonly a cluster will have three mons. This would mean that one mon could go down and allow the cluster to remain healthy. You would still have 2/3 mons running to give you consensus in the cluster for any operation. -You will always want an odd number of mons. Fifty percent of mons will not be sufficient to maintain quorum. If you had two mons and one +For highest availability, an odd number of mons is required. Fifty percent of mons will not be sufficient to maintain quorum. If you had two mons and one of them went down, you would have 1/2 of quorum. Since that is not a super-majority, the cluster would have to wait until the second mon is up again. -Therefore, Rook prohibits an even number of mons. +Rook allows an even number of mons for higher durability. See the [disaster recovery guide](ceph-disaster-recovery.md#restoring-mon-quorum) if quorum is lost and to recover mon quorum from a single mon. The number of mons to create in a cluster depends on your tolerance for losing a node. If you have 1 mon zero nodes can be lost to maintain quorum. With 3 mons one node can be lost, and with 5 mons two nodes can be lost. Because the Rook operator will automatically diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index 9e8cca274c1b..41078bf51cfc 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.7.8 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph/monitoring ``` @@ -144,6 +144,18 @@ The following Grafana dashboards are available: * [Ceph - OSD (Single)](https://grafana.com/dashboards/5336) * [Ceph - Pools](https://grafana.com/dashboards/5342) +## Updates and Upgrades + +When updating Rook, there may be updates to RBAC for monitoring. It is easy to apply the changes +with each update or upgrade. This should be done at the same time you update Rook common resources +like `common.yaml`. + +```console +kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml +``` + +> This is updated automatically if you are upgrading via the helm chart + ## Teardown To clean up all the artifacts created by the monitoring walk-through, copy/paste the entire block below (note that errors about resources "not found" can be ignored): @@ -196,4 +208,32 @@ spec: labels: monitoring: prometheus: k8s -[...] \ No newline at end of file +[...] +``` + +### Horizontal Pod Scaling using Kubernetes Event-driven Autoscaling (KEDA) + +Using metrics exported from the Prometheus service, the horizontal pod scaling can use the custom metrics other than CPU and memory consumption. It can be done with help of Prometheus Scaler provided by the [KEDA](https://keda.sh/docs/2.4/scalers/prometheus/). See the [KEDA deployment guide](https://keda.sh/docs/2.4/deploy/) for details. + +Following is an example to autoscale RGW: +```YAML +apiVersion: keda.k8s.io/v1alpha1 +kind: ScaledObject +metadata: + name: rgw-scale + namespace: rook-ceph +spec: + scaleTargetRef: + kind: Deployment + deploymentName: rook-ceph-rgw-my-store-a # deployment for the autoscaling + minReplicaCount: 1 + maxReplicaCount: 5 + triggers: + - type: prometheus + metadata: + serverAddress: http://rook-prometheus.rook-ceph.svc:9090 + metricName: collecting_ceph_rgw_put + query: | + sum(rate(ceph_rgw_put[2m])) # promethues query used for autoscaling + threshold: "90" +``` diff --git a/Documentation/ceph-nfs-crd.md b/Documentation/ceph-nfs-crd.md index 9b10aa47fd12..76b3879b9be4 100644 --- a/Documentation/ceph-nfs-crd.md +++ b/Documentation/ceph-nfs-crd.md @@ -25,6 +25,8 @@ metadata: name: my-nfs namespace: rook-ceph spec: + # rados property is not used in versions of Ceph equal to or greater than + # 16.2.7, see note in RADOS settings section below. rados: # RADOS pool where NFS client recovery data and per-daemon configs are # stored. In this example the data pool for the "myfs" filesystem is used. @@ -88,9 +90,12 @@ ceph dashboard set-ganesha-clusters-rados-pool-namespace : **NOTE**: The RADOS settings aren't used in Ceph versions equal to or greater than Pacific 16.2.7, default values are used instead ".nfs" for the RADOS pool and the CephNFS CR's name for the RADOS namespace. However, RADOS settings are mandatory for versions preceding Pacific 16.2.7. + > **NOTE**: Don't use EC pools for NFS because ganesha uses omap in the recovery objects and grace db. EC pools do not support omap. ## EXPORT Block Configuration diff --git a/Documentation/ceph-object-multisite.md b/Documentation/ceph-object-multisite.md index 97e397141c88..db9061b0ab77 100644 --- a/Documentation/ceph-object-multisite.md +++ b/Documentation/ceph-object-multisite.md @@ -22,7 +22,7 @@ To review core multisite concepts please read the [ceph-multisite design overvie ## Prerequisites -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). +This guide assumes a Rook cluster as explained in the [Quickstart](quickstart.md). # Creating Object Multisite diff --git a/Documentation/ceph-object-store-crd.md b/Documentation/ceph-object-store-crd.md index ee7c40fca737..c1e1422fa554 100644 --- a/Documentation/ceph-object-store-crd.md +++ b/Documentation/ceph-object-store-crd.md @@ -37,6 +37,7 @@ spec: preservePoolsOnDelete: true gateway: # sslCertificateRef: + # caBundleRef: port: 80 # securePort: 443 instances: 1 @@ -91,7 +92,21 @@ When the `zone` section is set pools with the object stores name will not be cre The gateway settings correspond to the RGW daemon settings. * `type`: `S3` is supported -* `sslCertificateRef`: If specified, this is the name of the Kubernetes secret(`opaque` or `tls` type) that contains the TLS certificate to be used for secure connections to the object store. Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be in the format expected by the [RGW service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): "The server key, server certificate, and any other CA or intermediate certificates be supplied in one file. Each of these items must be in PEM form." +* `sslCertificateRef`: If specified, this is the name of the Kubernetes secret(`opaque` or `tls` + type) that contains the TLS certificate to be used for secure connections to the object store. + If it is an opaque Kubernetes Secret, Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be + in the format expected by the [RGW + service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): + "The server key, server certificate, and any other CA or intermediate certificates be supplied in + one file. Each of these items must be in PEM form." They are scenarios where the certificate DNS is set for a particular domain + that does not include the local Kubernetes DNS, namely the object store DNS service endpoint. If + adding the service DNS name to the certificate is not empty another key can be specified in the + secret's data: `insecureSkipVerify: true` to skip the certificate verification. It is not + recommended to enable this option since TLS is susceptible to machine-in-the-middle attacks unless + custom verification is used. +* `caBundleRef`: If specified, this is the name of the Kubernetes secret (type `opaque`) that + contains additional custom ca-bundle to use. The secret must be in the same namespace as the Rook + cluster. Rook will look in the secret provided at the `cabundle` key name. * `port`: The port on which the Object service will be reachable. If host networking is enabled, the RGW daemons will also listen on that port. If running on SDN, the RGW daemon listening port will be 8080 internally. * `securePort`: The secure port on which RGW pods will be listening. A TLS certificate must be specified either via `sslCerticateRef` or `service.annotations` * `instances`: The number of pods that will be started to load balance this object store. diff --git a/Documentation/ceph-object-store-user-crd.md b/Documentation/ceph-object-store-user-crd.md index 34a77d45c7eb..27bbdbffaf3f 100644 --- a/Documentation/ceph-object-store-user-crd.md +++ b/Documentation/ceph-object-store-user-crd.md @@ -20,6 +20,13 @@ metadata: spec: store: my-store displayName: my-display-name + quotas: + maxBuckets: 100 + maxSize: 10G + maxObjects: 10000 + capabilities: + user: "*" + bucket: "*" ``` ## Object Store User Settings @@ -33,3 +40,16 @@ spec: * `store`: The object store in which the user will be created. This matches the name of the objectstore CRD. * `displayName`: The display name which will be passed to the `radosgw-admin user create` command. +* `quotas`: This represents quota limitation can be set on the user (support added in Rook v1.7.3 and up). + Please refer [here](https://docs.ceph.com/en/latest/radosgw/admin/#quota-management) for details. + * `maxBuckets`: The maximum bucket limit for the user. + * `maxSize`: Maximum size limit of all objects across all the user's buckets. + * `maxObjects`: Maximum number of objects across all the user's buckets. +* `capabilities`: Ceph allows users to be given additional permissions (support added in Rook v1.7.3 and up). Due to missing APIs in go-ceph for updating the user capabilities, this setting can currently only be used during the creation of the object store user. If a user's capabilities need modified, the user must be deleted and re-created. + See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/#add-remove-admin-capabilities) for more info. + Rook supports adding `read`, `write`, `read, write`, or `*` permissions for the following resources: + * `users` + * `buckets` + * `usage` + * `metadata` + * `zone` diff --git a/Documentation/ceph-object.md b/Documentation/ceph-object.md index 6a9270425b0f..d03e18e7699d 100644 --- a/Documentation/ceph-object.md +++ b/Documentation/ceph-object.md @@ -10,7 +10,7 @@ Object storage exposes an S3 API to the storage cluster for applications to put ## Prerequisites -This guide assumes a Rook cluster as explained in the [Quickstart](ceph-quickstart.md). +This guide assumes a Rook cluster as explained in the [Quickstart](quickstart.md). ## Configure an Object Store diff --git a/Documentation/ceph-osd-mgmt.md b/Documentation/ceph-osd-mgmt.md index 3d88c6a84de9..d528dc4abe24 100644 --- a/Documentation/ceph-osd-mgmt.md +++ b/Documentation/ceph-osd-mgmt.md @@ -33,7 +33,7 @@ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-t ## Add an OSD -The [QuickStart Guide](ceph-quickstart.md) will provide the basic steps to create a cluster and start some OSDs. For more details on the OSD +The [QuickStart Guide](quickstart.md) will provide the basic steps to create a cluster and start some OSDs. For more details on the OSD settings also see the [Cluster CRD](ceph-cluster-crd.md) documentation. If you are not seeing OSDs created, see the [Ceph Troubleshooting Guide](ceph-common-issues.md). To add more OSDs, Rook will automatically watch for new nodes and devices being added to your cluster. @@ -70,10 +70,10 @@ If you are using `useAllDevices: true`, no change to the CR is necessary. removal steps in order to prevent Rook from detecting the old OSD and trying to re-create it before the disk is wiped or removed.** -To stop the Rook Operator, run +To stop the Rook Operator, run `kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0`. -You must perform steps below to (1) purge the OSD and either (2.a) delete the underlying data or +You must perform steps below to (1) purge the OSD and either (2.a) delete the underlying data or (2.b)replace the disk before starting the Rook Operator again. Once you have done that, you can start the Rook operator again with @@ -119,13 +119,15 @@ If you want to remove OSDs by hand, continue with the following sections. Howeve If the OSD purge job fails or you need fine-grained control of the removal, here are the individual commands that can be run from the toolbox. -1. Mark the OSD as `out` if not already marked as such by Ceph. This signals Ceph to start moving (backfilling) the data that was on that OSD to another OSD. +1. Detach the OSD PVC from Rook + - `kubectl -n rook-ceph label pvc ceph.rook.io/DeviceSetPVCId-` +2. Mark the OSD as `out` if not already marked as such by Ceph. This signals Ceph to start moving (backfilling) the data that was on that OSD to another OSD. - `ceph osd out osd.` (for example if the OSD ID is 23 this would be `ceph osd out osd.23`) -2. Wait for the data to finish backfilling to other OSDs. +3. Wait for the data to finish backfilling to other OSDs. - `ceph status` will indicate the backfilling is done when all of the PGs are `active+clean`. If desired, it's safe to remove the disk after that. -3. Remove the OSD from the Ceph cluster +4. Remove the OSD from the Ceph cluster - `ceph osd purge --yes-i-really-mean-it` -4. Verify the OSD is removed from the node in the CRUSH map +5. Verify the OSD is removed from the node in the CRUSH map - `ceph osd tree` The operator can automatically remove OSD deployments that are considered "safe-to-destroy" by Ceph. diff --git a/Documentation/ceph-pool-crd.md b/Documentation/ceph-pool-crd.md index c5d28f49a956..0a6772357bd5 100644 --- a/Documentation/ceph-pool-crd.md +++ b/Documentation/ceph-pool-crd.md @@ -205,9 +205,11 @@ stretched) then you will have 2 replicas per datacenter where each replica ends * `mirroring`: Sets up mirroring of the pool * `enabled`: whether mirroring is enabled on that pool (default: false) * `mode`: mirroring mode to run, possible values are "pool" or "image" (required). Refer to the [mirroring modes Ceph documentation](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#enable-mirroring) for more details. - * `snapshotSchedules`: schedule(s) snapshot at the **pool** level. **Only** supported as of Ceph Octopus release. One or more schedules are supported. + * `snapshotSchedules`: schedule(s) snapshot at the **pool** level. **Only** supported as of Ceph Octopus (v15) release. One or more schedules are supported. * `interval`: frequency of the snapshots. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively. * `startTime`: optional, determines at what time the snapshot process starts, specified using the ISO 8601 time format. + * `peers`: to configure mirroring peers. See the prerequisite [RBD Mirror documentation](ceph-rbd-mirror-crd.md) first. + * `secretNames`: a list of peers to connect to. Currently **only a single** peer is supported where a peer represents a Ceph cluster. * `statusCheck`: Sets up pool mirroring status * `mirror`: displays the mirroring status diff --git a/Documentation/ceph-quickstart.md b/Documentation/ceph-quickstart.md deleted file mode 100644 index 51fa217cbbc5..000000000000 --- a/Documentation/ceph-quickstart.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Ceph Storage -weight: 300 -indent: true ---- - -{% include_relative branch.liquid %} - - -# Ceph Storage Quickstart - -This guide will walk you through the basic setup of a Ceph cluster and enable you to consume block, object, and file storage -from other pods running in your cluster. - -## Minimum Version - -Kubernetes **v1.11** or higher is supported by Rook. - -**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Rook CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. - -## Prerequisites - -To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md). - -In order to configure the Ceph storage cluster, at least one of these local storage options are required: -- Raw devices (no partitions or formatted filesystems) - - This requires `lvm2` to be installed on the host. - To avoid this dependency, you can create a single full-disk partition on the disk (see below) -- Raw partitions (no formatted filesystem) -- Persistent Volumes available from a storage class in `block` mode - -You can confirm whether your partitions or devices are formatted filesystems with the following command. - -```console -lsblk -f -``` ->``` ->NAME FSTYPE LABEL UUID MOUNTPOINT ->vda ->└─vda1 LVM2_member >eSO50t-GkUV-YKTH-WsGq-hNJY-eKNf-3i07IB -> ├─ubuntu--vg-root ext4 c2366f76-6e21-4f10-a8f3-6776212e2fe4 / -> └─ubuntu--vg-swap_1 swap 9492a3dc-ad75-47cd-9596-678e8cf17ff9 [SWAP] ->vdb ->``` - -If the `FSTYPE` field is not empty, there is a filesystem on top of the corresponding device. In this case, you can use vdb for Ceph and can't use vda and its partitions. - -## TL;DR - -If you're feeling lucky, a simple Rook cluster can be created with the following kubectl commands and [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). For the more detailed install, skip to the next section to [deploy the Rook operator](#deploy-the-rook-operator). - -```console -$ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/ceph -kubectl create -f crds.yaml -f common.yaml -f operator.yaml -kubectl create -f cluster.yaml -``` - -After the cluster is running, you can create [block, object, or file](#storage) storage to be consumed by other applications in your cluster. - -### Cluster Environments - -The Rook documentation is focused around starting Rook in a production environment. Examples are also -provided to relax some settings for test environments. When creating the cluster later in this guide, consider these example cluster manifests: -- [cluster.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml): Cluster settings for a production cluster running on bare metal. Requires at least three worker nodes. -- [cluster-on-pvc.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml): Cluster settings for a production cluster running in a dynamic cloud environment. -- [cluster-test.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-test.yaml): Cluster settings for a test environment such as minikube. - -See the [Ceph examples](ceph-examples.md) for more details. - -## Deploy the Rook Operator - -The first step is to deploy the Rook operator. Check that you are using the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph) that correspond to your release of Rook. For more options, see the [examples documentation](ceph-examples.md). - -```console -cd cluster/examples/kubernetes/ceph -kubectl create -f crds.yaml -f common.yaml -f operator.yaml - -# verify the rook-ceph-operator is in the `Running` state before proceeding -kubectl -n rook-ceph get pod -``` - -You can also deploy the operator with the [Rook Helm Chart](helm-operator.md). - -Before you start the operator in production, there are some settings that you may want to consider: -1. If you are using kubernetes v1.15 or older you need to create CRDs found here `/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crd.yaml`. - The apiextension v1beta1 version of CustomResourceDefinition was deprecated in Kubernetes v1.16. -2. Consider if you want to enable certain Rook features that are disabled by default. See the [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml) for these and other advanced settings. - 1. Device discovery: Rook will watch for new devices to configure if the `ROOK_ENABLE_DISCOVERY_DAEMON` setting is enabled, commonly used in bare metal clusters. - 2. Flex driver: The flex driver is deprecated in favor of the CSI driver, but can still be enabled with the `ROOK_ENABLE_FLEX_DRIVER` setting. - 3. Node affinity and tolerations: The CSI driver by default will run on any node in the cluster. To configure the CSI driver affinity, several settings are available. - -If you wish to deploy into a namespace other than the default `rook-ceph`, see the -[Ceph advanced configuration section](ceph-advanced-configuration.md#using-alternate-namespaces) on the topic. - -## Create a Rook Ceph Cluster - -Now that the Rook operator is running we can create the Ceph cluster. For the cluster to survive reboots, -make sure you set the `dataDirHostPath` property that is valid for your hosts. For more settings, see the documentation on [configuring the cluster](ceph-cluster-crd.md). - -Create the cluster: - -```console -kubectl create -f cluster.yaml -``` - -Use `kubectl` to list pods in the `rook-ceph` namespace. You should be able to see the following pods once they are all running. -The number of osd pods will depend on the number of nodes in the cluster and the number of devices configured. -If you did not modify the `cluster.yaml` above, it is expected that one OSD will be created per node. -The CSI, `rook-ceph-agent` (flex driver), and `rook-discover` pods are also optional depending on your settings. - -> If the `rook-ceph-mon`, `rook-ceph-mgr`, or `rook-ceph-osd` pods are not created, please refer to the -> [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. - -```console -kubectl -n rook-ceph get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->csi-cephfsplugin-provisioner-d77bb49c6-n5tgs 5/5 Running 0 140s ->csi-cephfsplugin-provisioner-d77bb49c6-v9rvn 5/5 Running 0 140s ->csi-cephfsplugin-rthrp 3/3 Running 0 140s ->csi-rbdplugin-hbsm7 3/3 Running 0 140s ->csi-rbdplugin-provisioner-5b5cd64fd-nvk6c 6/6 Running 0 140s ->csi-rbdplugin-provisioner-5b5cd64fd-q7bxl 6/6 Running 0 140s ->rook-ceph-crashcollector-minikube-5b57b7c5d4-hfldl 1/1 Running 0 105s ->rook-ceph-mgr-a-64cd7cdf54-j8b5p 1/1 Running 0 77s ->rook-ceph-mon-a-694bb7987d-fp9w7 1/1 Running 0 105s ->rook-ceph-mon-b-856fdd5cb9-5h2qk 1/1 Running 0 94s ->rook-ceph-mon-c-57545897fc-j576h 1/1 Running 0 85s ->rook-ceph-operator-85f5b946bd-s8grz 1/1 Running 0 92m ->rook-ceph-osd-0-6bb747b6c5-lnvb6 1/1 Running 0 23s ->rook-ceph-osd-1-7f67f9646d-44p7v 1/1 Running 0 24s ->rook-ceph-osd-2-6cd4b776ff-v4d68 1/1 Running 0 25s ->rook-ceph-osd-prepare-node1-vx2rz 0/2 Completed 0 60s ->rook-ceph-osd-prepare-node2-ab3fd 0/2 Completed 0 60s ->rook-ceph-osd-prepare-node3-w4xyz 0/2 Completed 0 60s ->``` - -To verify that the cluster is in a healthy state, connect to the [Rook toolbox](ceph-toolbox.md) and run the -`ceph status` command. - -* All mons should be in quorum -* A mgr should be active -* At least one OSD should be active -* If the health is not `HEALTH_OK`, the warnings or errors should be investigated - -```console -ceph status -``` ->``` -> cluster: -> id: a0452c76-30d9-4c1a-a948-5d8405f19a7c -> health: HEALTH_OK -> -> services: -> mon: 3 daemons, quorum a,b,c (age 3m) -> mgr: a(active, since 2m) -> osd: 3 osds: 3 up (since 1m), 3 in (since 1m) ->... ->``` - -If the cluster is not healthy, please refer to the [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. - -## Storage - -For a walkthrough of the three types of storage exposed by Rook, see the guides for: - -* **[Block](ceph-block.md)**: Create block storage to be consumed by a pod -* **[Object](ceph-object.md)**: Create an object store that is accessible inside or outside the Kubernetes cluster -* **[Shared Filesystem](ceph-filesystem.md)**: Create a filesystem to be shared across multiple pods - -## Ceph Dashboard - -Ceph has a dashboard in which you can view the status of your cluster. Please see the [dashboard guide](ceph-dashboard.md) for more details. - -## Tools - -We have created a toolbox container that contains the full suite of Ceph clients for debugging and troubleshooting your Rook cluster. Please see the [toolbox readme](ceph-toolbox.md) for setup and usage information. Also see our [advanced configuration](ceph-advanced-configuration.md) document for helpful maintenance and tuning examples. - -## Monitoring - -Each Rook cluster has some built in metrics collectors/exporters for monitoring with [Prometheus](https://prometheus.io/). -To learn how to set up monitoring for your Rook cluster, you can follow the steps in the [monitoring guide](./ceph-monitoring.md). - -## Teardown - -When you are done with the test cluster, see [these instructions](ceph-teardown.md) to clean up the cluster. diff --git a/Documentation/ceph-rbd-mirror-crd.md b/Documentation/ceph-rbd-mirror-crd.md index 2c8b022eac95..e768ecf1cde1 100644 --- a/Documentation/ceph-rbd-mirror-crd.md +++ b/Documentation/ceph-rbd-mirror-crd.md @@ -27,7 +27,7 @@ spec: ### Prerequisites -This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](ceph-quickstart.md) +This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](quickstart.md) ## Settings @@ -41,9 +41,6 @@ If any setting is unspecified, a suitable default will be used automatically. ### RBDMirror Settings * `count`: The number of rbd mirror instance to run. -* `peers`: to configure mirroring peers - * `secretNames`: a list of peers to connect to. Currently (Ceph Octopus release) **only a single** peer is supported where a peer represents a Ceph cluster. - However, if you want to enable mirroring of multiple pools, you would have to have **one Secret per pool**, but the token (the peer identity) must be the same. * `placement`: The rbd mirror pods can be given standard Kubernetes placement restrictions with `nodeAffinity`, `tolerations`, `podAffinity`, and `podAntiAffinity` similar to placement defined for daemons configured by the [cluster CRD](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml). * `annotations`: Key value pair list of annotations to add. * `labels`: Key value pair list of labels to add. @@ -52,53 +49,5 @@ If any setting is unspecified, a suitable default will be used automatically. ### Configuring mirroring peers -On an external site you want to mirror with, you need to create a bootstrap peer token. -The token will be used by one site to **pull** images from the other site. -The following assumes the name of the pool is "test" and the site name "europe" (just like the region), so we will be pulling images from this site: - -```console -external-cluster-console # rbd mirror pool peer bootstrap create test --site-name europe -``` - -For more details, refer to the official rbd mirror documentation on [how to create a bootstrap peer](https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#bootstrap-peers). - -When the peer token is available, you need to create a Kubernetes Secret. -Our `europe-cluster-peer-pool-test-1` will have to be created manually, like so: - -```console -$ kubectl -n rook-ceph create secret generic "europe-cluster-peer-pool-test-1" \ ---from-literal=token=eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ== \ ---from-literal=pool=test -``` - -Rook will read both `token` and `pool` keys of the Data content of the Secret. -Rook also accepts the `destination` key, which specifies the mirroring direction. -It defaults to rx-tx for bidirectional mirroring, but can also be set to rx-only for unidirectional mirroring. - -You can now inject the rbdmirror CR: - -```yaml -apiVersion: ceph.rook.io/v1 -kind: CephRBDMirror -metadata: - name: my-rbd-mirror - namespace: rook-ceph -spec: - count: 1 - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" -``` - -You can add more pools, for this just repeat the above and change the "pool" value of the Kubernetes Secret. -So the list might eventually look like: - -```yaml - peers: - secretNames: - - "europe-cluster-peer-pool-test-1" - - "europe-cluster-peer-pool-test-2" - - "europe-cluster-peer-pool-test-3" -``` - -Along with three Kubernetes Secret. +Configure mirroring peers individually for each CephBlockPool. Refer to the +[CephBlockPool documentation](ceph-pool-crd.md#mirroring) for more detail. diff --git a/Documentation/ceph-storage.md b/Documentation/ceph-storage.md index 28dc963f108d..19e632f836a6 100644 --- a/Documentation/ceph-storage.md +++ b/Documentation/ceph-storage.md @@ -9,28 +9,26 @@ Ceph is a highly scalable distributed storage solution for **block storage**, ** ## Design -Rook enables Ceph storage systems to run on Kubernetes using Kubernetes primitives. The following image illustrates how Ceph Rook integrates with Kubernetes: - -![Rook Architecture on Kubernetes](media/rook-architecture.png) +Rook enables Ceph storage to run on Kubernetes using Kubernetes primitives. With Ceph running in the Kubernetes cluster, Kubernetes applications can mount block devices and filesystems managed by Rook, or can use the S3/Swift API for object storage. The Rook operator automates configuration of storage components and monitors the cluster to ensure the storage remains available and healthy. The Rook operator is a simple container that has all that is needed to bootstrap -and monitor the storage cluster. The operator will start and monitor [Ceph monitor pods](ceph-mon-health.md), the Ceph OSD daemons to provide RADOS storage, as well as start and manage other Ceph daemons. The operator manages CRDs for pools, object stores (S3/Swift), and filesystems by initializing the pods and other artifacts necessary to run the services. +and monitor the storage cluster. The operator will start and monitor [Ceph monitor pods](ceph-mon-health.md), the Ceph OSD daemons to provide RADOS storage, as well as start and manage other Ceph daemons. The operator manages CRDs for pools, object stores (S3/Swift), and filesystems by initializing the pods and other resources necessary to run the services. The operator will monitor the storage daemons to ensure the cluster is healthy. Ceph mons will be started or failed over when necessary, and other adjustments are made as the cluster grows or shrinks. The operator will also watch for desired state changes -requested by the api service and apply the changes. +specified in the Ceph custom resources (CRs) and apply the changes. -The Rook operator also initializes the agents that are needed for consuming the storage. Rook automatically configures the Ceph-CSI driver to mount the storage to your pods. Rook's flex driver is also available, though it is not enabled by default and will soon be deprecated in favor of the CSI driver. +Rook automatically configures the Ceph-CSI driver to mount the storage to your pods. ![Rook Components on Kubernetes](media/kubernetes.png) -The `rook/ceph` image includes all necessary tools to manage the cluster -- there is no change to the data path. -Rook does not attempt to maintain full fidelity with Ceph. Many of the Ceph concepts like placement groups and crush maps -are hidden so you don't have to worry about them. Instead Rook creates a much simplified user experience for admins that is in terms +The `rook/ceph` image includes all necessary tools to manage the cluster. Rook is not in the Ceph data path. +Many of the Ceph concepts like placement groups and crush maps +are hidden so you don't have to worry about them. Instead Rook creates a simplified user experience for admins that is in terms of physical resources, pools, volumes, filesystems, and buckets. At the same time, advanced configuration can be applied when needed with the Ceph tools. Rook is implemented in golang. Ceph is implemented in C++ where the data path is highly optimized. We believe diff --git a/Documentation/ceph-teardown.md b/Documentation/ceph-teardown.md index db3f8dc0a56c..58e0ac02f4a7 100644 --- a/Documentation/ceph-teardown.md +++ b/Documentation/ceph-teardown.md @@ -153,7 +153,7 @@ If for some reason the operator is not able to remove the finalizer (ie. the ope ```console for CRD in $(kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}'); do kubectl get -n rook-ceph "$CRD" -o name | \ - xargs -I {} kubectl patch {} --type merge -p '{"metadata":{"finalizers": [null]}}' + xargs -I {} kubectl patch -n rook-ceph {} --type merge -p '{"metadata":{"finalizers": [null]}}' done ``` diff --git a/Documentation/ceph-toolbox.md b/Documentation/ceph-toolbox.md index a6baed263543..3ae509c3dbc3 100644 --- a/Documentation/ceph-toolbox.md +++ b/Documentation/ceph-toolbox.md @@ -13,7 +13,7 @@ The toolbox can be run in two modes: 1. [Interactive](#interactive-toolbox): Start a toolbox pod where you can connect and execute Ceph commands from a shell 2. [One-time job](#toolbox-job): Run a script with Ceph commands and collect the results from the job log -> Prerequisite: Before running the toolbox you should have a running Rook cluster deployed (see the [Quickstart Guide](ceph-quickstart.md)). +> Prerequisite: Before running the toolbox you should have a running Rook cluster deployed (see the [Quickstart Guide](quickstart.md)). ## Interactive Toolbox @@ -43,7 +43,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:master + image: rook/ceph:v1.7.8 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent @@ -133,7 +133,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:master + image: rook/ceph:v1.7.8 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -155,7 +155,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:master + image: rook/ceph:v1.7.8 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index d48448562eed..232f2772bbe5 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -18,7 +18,7 @@ We welcome feedback and opening issues! ## Supported Versions -This guide is for upgrading from **Rook v1.5.x to Rook v1.6.x**. +This guide is for upgrading from **Rook v1.6.x to Rook v1.7.x**. Please refer to the upgrade guides from previous releases for supported upgrade paths. Rook upgrades are only supported between official releases. Upgrades to and from `master` are not @@ -27,6 +27,7 @@ supported. For a guide to upgrade previous versions of Rook, please refer to the version of documentation for those releases. +* [Upgrade 1.5 to 1.6](https://rook.io/docs/rook/v1.6/ceph-upgrade.html) * [Upgrade 1.4 to 1.5](https://rook.io/docs/rook/v1.5/ceph-upgrade.html) * [Upgrade 1.3 to 1.4](https://rook.io/docs/rook/v1.4/ceph-upgrade.html) * [Upgrade 1.2 to 1.3](https://rook.io/docs/rook/v1.3/ceph-upgrade.html) @@ -52,29 +53,38 @@ With this upgrade guide, there are a few notes to consider: Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to another are as simple as updating the common resources and the image of the Rook operator. For -example, when Rook v1.6.1 is released, the process of updating from v1.6.0 is as simple as running +example, when Rook v1.7.8 is released, the process of updating from v1.7.0 is as simple as running the following: -First get the latest common resources manifests that contain the latest changes for Rook v1.6. +First get the latest common resources manifests that contain the latest changes for Rook v1.7. ```sh -git clone --single-branch --depth=1 --branch v1.6.1 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.7.8 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` +**IMPORTANT** If you have RBD or CephFS volumes and are upgrading from Rook v1.6.0 - v1.6.4, +there is an issue upgrading from those versions that causes the volumes to hang. +Nodes will need to be restarted for the volumes to connect again. See +[this issue](https://github.com/rook/rook/issues/8085#issuecomment-859234755) for more details. +Future upgrades of Rook will not have this issue. + If you have deployed the Rook Operator or the Ceph cluster into a different namespace than `rook-ceph`, see the [Update common resources and CRDs](#1-update-common-resources-and-crds) section for instructions on how to change the default namespaces in `common.yaml`. -Then apply the latest changes from v1.6 and update the Rook Operator image. +Then apply the latest changes from v1.7 and update the Rook Operator image. ```console kubectl apply -f common.yaml -f crds.yaml -kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.1 +kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.8 ``` As exemplified above, it is a good practice to update Rook-Ceph common resources from the example manifests before any update. The common resources and CRDs might not be updated with every release, but K8s will only apply updates to the ones that changed. +Also update optional resources like Prometheus monitoring noted more fully in the +[upgrade section below](#updates-for-optional-resources). + ## Helm * The minimum supported Helm version is **v3.2.0** @@ -86,7 +96,7 @@ Helm will **not** update the Ceph version. See [Ceph Version Upgrades](#ceph-ver instructions on updating the Ceph version. -## Upgrading from v1.5 to v1.6 +## Upgrading from v1.6 to v1.7 **Rook releases from master are expressly unsupported.** It is strongly recommended that you use [official releases](https://github.com/rook/rook/releases) of Rook. Unreleased versions from the @@ -94,7 +104,7 @@ master branch are subject to changes and incompatibilities that will not be supp official releases. Builds from the master branch can have functionality changed or removed at any time without compatibility support and without prior notice. -### Prerequisites +### **Prerequisites** We will do all our work in the Ceph example manifests directory. @@ -127,6 +137,12 @@ In order to successfully upgrade a Rook cluster, the following prerequisites mus * All pods consuming Rook storage should be created, running, and in a steady state. No Rook persistent volumes should be in the act of being created or deleted. +**IMPORTANT** If you have RBD or CephFS volumes and are upgrading from Rook v1.6.0 - v1.6.4, +there is an issue upgrading from those versions that causes the volumes to hang. +Nodes will need to be restarted for the volumes to connect again. See +[this issue](https://github.com/rook/rook/issues/8085#issuecomment-859234755) for more details. +Future upgrades of Rook will not have this issue. + ## Health Verification Before we begin the upgrade process, let's first review some ways that you can verify the health of @@ -140,7 +156,7 @@ See the common issues pages for troubleshooting and correcting health issues: * [General troubleshooting](./common-issues.md) * [Ceph troubleshooting](./ceph-common-issues.md) -### Pods all Running +### **Pods all Running** In a healthy Rook cluster, the operator, the agents and all Rook namespace pods should be in the `Running` state and have few, if any, pod restarts. To verify this, run the following commands: @@ -149,7 +165,7 @@ In a healthy Rook cluster, the operator, the agents and all Rook namespace pods kubectl -n $ROOK_CLUSTER_NAMESPACE get pods ``` -### Status Output +### **Status Output** The Rook toolbox contains the Ceph tools that can give you status details of the cluster with the `ceph status` command. Let's look at an output sample and review some of the details: @@ -201,9 +217,9 @@ details on the health of the system, such as `ceph osd status`. See the Rook will prevent the upgrade of the Ceph daemons if the health is in a `HEALTH_ERR` state. If you desired to proceed with the upgrade anyway, you will need to set either `skipUpgradeChecks: true` or `continueUpgradeAfterChecksEvenIfNotHealthy: true` -as described in the [cluster CR settings](https://rook.github.io/docs/rook/v1.6/ceph-cluster-crd.html#cluster-settings). +as described in the [cluster CR settings](https://rook.github.io/docs/rook/v1.7/ceph-cluster-crd.html#cluster-settings). -### Container Versions +### **Container Versions** The container version running in a specific pod in the Rook cluster can be verified in its pod spec output. For example for the monitor pod `mon-b`, we can verify the container version it is running @@ -234,7 +250,7 @@ kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -o jsonpath='{range .items[*] kubectl -n $ROOK_CLUSTER_NAMESPACE get jobs -o jsonpath='{range .items[*]}{.metadata.name}{" \tsucceeded: "}{.status.succeeded}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' ``` -### Rook Volume Health +### **Rook Volume Health** Any pod that is using a Rook volume should also remain healthy: @@ -244,9 +260,9 @@ Any pod that is using a Rook volume should also remain healthy: ## Rook Operator Upgrade Process -In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.5.9` to -the version `v1.6.0`. This upgrade should work from any official patch release of Rook v1.5 to any -official patch release of v1.6. +In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.6.8` to +the version `v1.7.8`. This upgrade should work from any official patch release of Rook v1.6 to any +official patch release of v1.7. **Rook release from `master` are expressly unsupported.** It is strongly recommended that you use [official releases](https://github.com/rook/rook/releases) of Rook. Unreleased versions from the @@ -261,11 +277,7 @@ if applicable. Let's get started! -> **IMPORTANT** If your CephCluster has specified `driveGroups` in the spec, you must follow the -> instructions to [migrate the Drive Group spec](#migrate-the-drive-group-spec) before performing -> any of the upgrade steps below. - -## 1. Update common resources and CRDs +### **1. Update common resources and CRDs** > Automatically updated if you are upgrading via the helm chart @@ -277,9 +289,9 @@ needed by the Operator. Also update the Custom Resource Definitions (CRDs). > `rbac.authorization.k8s.io/v1beta1` instead of `rbac.authorization.k8s.io/v1` > You will also need to apply `pre-k8s-1.16/crds.yaml` instead of `crds.yaml`. -First get the latest common resources manifests that contain the latest changes for Rook v1.6. +First get the latest common resources manifests that contain the latest changes. ```sh -git clone --single-branch --depth=1 --branch v1.6.0 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.7.8 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` @@ -293,24 +305,21 @@ sed -i.bak \ common.yaml ``` -Then apply the latest changes from v1.6. +Then apply the latest changes. ```sh kubectl apply -f common.yaml -f crds.yaml ``` -> **NOTE:** If your Rook-Ceph cluster was initially installed with rook v1.4 or lower, the above -> command will return errors due to updates from Kubernetes' v1beta1 Custom Resource Definitions. -> The error will contain text similar to `... spec.preserveUnknownFields: Invalid value...`. +#### **Updates for optional resources** + +If you have [Prometheus monitoring](ceph-monitoring.md) enabled, follow the +step to upgrade the Prometheus RBAC resources as well. -If you experience this error applying the latest changes to CRDs, use `kubectl`'s `replace` command -to replace the resources followed by `apply` to verify that the resources are updated without other -errors. ```sh -kubectl replace -f crds.yaml -kubectl apply -f crds.yaml +kubectl apply -f cluster/examples/kubernetes/ceph/monitoring/rbac.yaml ``` -## 2. Update Ceph CSI versions +### **2. Update Ceph CSI versions** > Automatically updated if you are upgrading via the helm chart @@ -320,18 +329,18 @@ details. > Note: If using snapshots, refer to the [Upgrade Snapshot API guide](ceph-csi-snapshot.md#upgrade-snapshot-api). -## 3. Update the Rook Operator +### **3. Update the Rook Operator** > Automatically updated if you are upgrading via the helm chart -The largest portion of the upgrade is triggered when the operator's image is updated to `v1.6.x`. +The largest portion of the upgrade is triggered when the operator's image is updated to `v1.7.x`. When the operator is updated, it will proceed to update all of the Ceph daemons. ```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.0 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.7.8 ``` -## 4. Wait for the upgrade to complete +### **4. Wait for the upgrade to complete** Watch now in amazement as the Ceph mons, mgrs, OSDs, rbd-mirrors, MDSes and RGWs are terminated and replaced with updated versions in sequence. The cluster may be offline very briefly as mons update, @@ -343,20 +352,19 @@ The versions of the components can be viewed as they are updated: watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' ``` -As an example, this cluster is midway through updating the OSDs from v1.5 to v1.6. When all -deployments report `1/1/1` availability and `rook-version=v1.6.0`, the Ceph cluster's core -components are fully updated. +As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` +availability and `rook-version=v1.7.8`, the Ceph cluster's core components are fully updated. >``` >Every 2.0s: kubectl -n rook-ceph get deployment -o j... > ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.6.0 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.6.0 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.6.0 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.6.0 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.6.0 ->rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.5.9 ->rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.5.9 +>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.7.8 +>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.7.8 +>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.7.8 +>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.7.8 +>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.7.8 +>rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.6.8 +>rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.6.8 >``` An easy check to see if the upgrade is totally finished is to check that there is only one @@ -365,25 +373,57 @@ An easy check to see if the upgrade is totally finished is to check that there i ```console # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: - rook-version=v1.5.9 - rook-version=v1.6.0 + rook-version=v1.6.8 + rook-version=v1.7.8 This cluster is finished: - rook-version=v1.6.0 + rook-version=v1.7.8 ``` -## 5. Verify the updated cluster +### **5. Verify the updated cluster** -At this point, your Rook operator should be running version `rook/ceph:v1.6.0`. +At this point, your Rook operator should be running version `rook/ceph:v1.7.8`. Verify the Ceph cluster's health using the [health verification section](#health-verification). +### **6. Update CephRBDMirror and CephBlockPool configs** + +If you are not using a `CephRBDMirror` in your Rook cluster, you may disregard this section. + +Otherwise, please note that the location of the `CephRBDMirror` `spec.peers` config has moved to +`CephBlockPool` `spec.mirroring.peers` in Rook v1.7. This change allows each pool to have its own +peer and enables pools to re-use an existing peer secret if it points to the same cluster peer. + +You may wish to see the [CephBlockPool spec Documentation](ceph-pool-crd.md#spec) for the latest +configuration advice. + +The pre-existing config location in `CephRBDMirror` `spec.peers` will continue to be supported, but +users are still encouraged to migrate this setting from `CephRBDMirror` to relevant `CephBlockPool` +resources. + +To migrate the setting, follow these steps: +1. Stop the Rook-Ceph operator by downscaling the Deployment to zero replicas. + ```sh + kubectl -n $ROOK_OPERATOR_NAMESPACE scale deployment rook-ceph-operator --replicas=0 + ``` +2. Copy the `spec.peers` config from `CephRBDMirror` to every `CephBlockPool` in your cluster that + has mirroring enabled. +3. Remove the `peers` spec from the `CephRBDMirror` resource. +4. Resume the Rook-Ceph operator by scaling the Deployment back to one replica. + ```sh + kubectl -n $ROOK_OPERATOR_NAMESPACE scale deployment rook-ceph-operator --replicas=1 + ``` + ## Ceph Version Upgrades -Rook v1.6 now supports Ceph Pacific 16.2.0 or newer. Support remains for Ceph Nautilus 14.2.5 or -newer and Ceph Octopus v15.2.0 or newer. These are the only supported major versions of Ceph. Rook -v1.7 will no longer support Ceph Nautilus (14.2.x), and users will have to upgrade Ceph to -Octopus (15.2.x) or Pacific (16.2.x) before the next upgrade. +Rook v1.7 supports the following Ceph versions: + - Ceph Pacific 16.2.0 or newer + - Ceph Octopus v15.2.0 or newer + - Ceph Nautilus 14.2.5 or newer + +These are the only supported versions of Ceph. Rook v1.8 will no longer support Ceph Nautilus +(14.2.x), and users will have to upgrade Ceph to Octopus (15.2.x) or Pacific (16.2.x) upgrading to +Rook v1.8. > **IMPORTANT: When an update is requested, the operator will check Ceph's status, if it is in `HEALTH_ERR` it will refuse to do the upgrade.** @@ -395,12 +435,57 @@ updated we wait for things to settle (monitors to be in a quorum, PGs to be clea MDSes, etc.), then only when the condition is met we move to the next daemon. We repeat this process until all the daemons have been updated. -### Ceph images +### Disable `bluestore_fsck_quick_fix_on_mount` +> **WARNING: There is a notice from Ceph for users upgrading to Ceph Pacific v16.2.6 or lower from +> an earlier major version of Ceph. If you are upgrading to Ceph Pacific (v16), please upgrade to +> v16.2.7 or higher if possible.** + +If you must upgrade to a version lower than v16.2.7, ensure that all instances of +`bluestore_fsck_quick_fix_on_mount` in Rook-Ceph configs are removed. + +First, Ensure no references to `bluestore_fsck_quick_fix_on_mount` are present in the +`rook-config-override` [ConfigMap](ceph-advanced-configuration.md#custom-cephconf-settings). Remove +them if they exist. + +Finally, ensure no references to `bluestore_fsck_quick_fix_on_mount` are present in Ceph's internal +configuration. Run all commands below from the [toolbox](ceph-toolbox.md). + +In the example below, two instances of `bluestore_fsck_quick_fix_on_mount` are present and are +commented, and some output text has been removed for brevity. +```sh +ceph config-key dump +``` +``` +{ + "config/global/bluestore_fsck_quick_fix_on_mount": "false", # <-- FALSE + "config/global/osd_scrub_auto_repair": "true", + "config/mgr.a/mgr/dashboard/server_port": "7000", + "config/mgr/mgr/balancer/active": "true", + "config/osd/bluestore_fsck_quick_fix_on_mount": "true", # <-- TRUE +} +``` + +Remove the configs for both with the commands below. Note how the `config/...` paths correspond to +the output above. +```sh +ceph config-key rm config/global/bluestore_fsck_quick_fix_on_mount +ceph config-key rm config/osd/bluestore_fsck_quick_fix_on_mount +``` + +It's best to run `ceph config-key dump` again to verify references to +`bluestore_fsck_quick_fix_on_mount` are gone after this. + +See for more information, see here: https://github.com/rook/rook/issues/9185 + + +### **Ceph images** + +Official Ceph container images can be found on [Quay](https://quay.io/repository/ceph/ceph?tab=tags). +Prior to August 2021, official images were on docker.io. While those images will remain on Docker Hub, all new images are being pushed to Quay. -Official Ceph container images can be found on [Docker Hub](https://hub.docker.com/r/ceph/ceph/tags/). These images are tagged in a few ways: -* The most explicit form of tags are full-ceph-version-and-build tags (e.g., `v16.2.5-20210708`). +* The most explicit form of tags are full-ceph-version-and-build tags (e.g., `v16.2.6-20210918`). These tags are recommended for production clusters, as there is no possibility for the cluster to be heterogeneous with respect to the version of Ceph running in containers. * Ceph major version tags (e.g., `v16`) are useful for development and test clusters so that the @@ -408,20 +493,20 @@ These images are tagged in a few ways: **Ceph containers other than the official images from the registry above will not be supported.** -### Example upgrade to Ceph Octopus +### **Example upgrade to Ceph Pacific** -#### 1. Update the main Ceph daemons +#### **1. Update the main Ceph daemons** The majority of the upgrade will be handled by the Rook operator. Begin the upgrade by changing the Ceph image field in the cluster CRD (`spec.cephVersion.image`). ```sh -NEW_CEPH_IMAGE='quay.io/ceph/ceph:v16.2.5-20210708' +NEW_CEPH_IMAGE='quay.io/ceph/ceph:v16.2.6-20210918' CLUSTER_NAME="$ROOK_CLUSTER_NAMESPACE" # change if your cluster name is not the Rook namespace kubectl -n $ROOK_CLUSTER_NAMESPACE patch CephCluster $CLUSTER_NAME --type=merge -p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}" ``` -#### 2. Wait for the daemon pod updates to complete +#### **2. Wait for the daemon pod updates to complete** As with upgrading Rook, you must now wait for the upgrade to complete. Status can be determined in a similar way to the Rook upgrade as well. @@ -435,16 +520,17 @@ Determining when the Ceph has fully updated is rather simple. ```console kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: - ceph-version=15.2.12-0 - ceph-version=16.2.5-0 + ceph-version=15.2.13-0 + ceph-version=16.2.6-0 This cluster is finished: - ceph-version=16.2.5-0 + ceph-version=16.2.6-0 ``` -#### 3. Verify the updated cluster +#### **3. Verify the updated cluster** Verify the Ceph cluster's health using the [health verification section](#health-verification). + ## CSI Version If you have a cluster running with CSI drivers enabled and you want to configure Rook @@ -462,68 +548,36 @@ kubectl -n $ROOK_OPERATOR_NAMESPACE edit configmap rook-ceph-operator-config The default upstream images are included below, which you can change to your desired images. ```yaml -ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.3.1" -ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" -ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" -ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" -ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" -ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" +ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" +ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0" +ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0" +ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.3.0" +ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" +ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" +CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" ``` -### Use default images +### **Use default images** If you would like Rook to use the inbuilt default upstream images, then you may simply remove all variables matching `ROOK_CSI_*_IMAGE` from the above ConfigMap and/or the operator deployment. -### Verifying updates +### **Verifying updates** -You can use the below command to see the CSI images currently being used in the cluster. +You can use the below command to see the CSI images currently being used in the cluster. Note that +not all images (like `volumereplication-operator`) may be present in every cluster depending on +which CSI features are enabled. ```console kubectl --namespace rook-ceph get pod -o jsonpath='{range .items[*]}{range .spec.containers[*]}{.image}{"\n"}' -l 'app in (csi-rbdplugin,csi-rbdplugin-provisioner,csi-cephfsplugin,csi-cephfsplugin-provisioner)' | sort | uniq ``` ``` -quay.io/cephcsi/cephcsi:v3.3.1 -k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 -k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 -k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 -k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 -k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 +k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 +k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 +k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 +k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 +k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 +quay.io/cephcsi/cephcsi:v3.4.0 +quay.io/csiaddons/volumereplication-operator:v0.1.0 ``` - -## Replace lvm mode OSDs with raw mode (if you use LV-backed PVC) - -For LV-backed PVC, we recommend replacing lvm mode OSDs with raw mode OSDs. See -[common issue](ceph-common-issues.md#lvm-metadata-can-be-corrupted-with-osd-on-lv-backed-pvc). - - -## Migrate the Drive Group spec - -If your CephCluster has specified `driveGroups` in the `spec`, you must follow these instructions to -migrate the Drive Group spec before performing the upgrade from Rook v1.5.x to v1.6.x. Do not follow -these steps if no `driveGroups` are specified. - -Refer to the [CephCluster CRD Storage config](ceph-cluster-crd.md#storage-selection-settings) to -understand how to configure your nodes to host OSDs as you desire for future disks added to cluster -nodes. - -At minimum, you must migrate enough of the config so that Rook knows which nodes are already acting -as OSD hosts so that it can update the OSD Deployments. This minimal migration that allows Rook to -update existing OSD Deployments is explained below. - -1. If any of your specified Drive Groups use `host_pattern: '*'`, set `spec.storage.useAllNodes: true`. - 1. If a drive group that uses `host_pattern: '*'` also sets `data_devices:all: true`, set - `spec.storage.useAllDevices: true`, and no more config migration should be necessary. -1. If no Drive Groups use `host_pattern: '*'`, there are two basic options: - 1. Determine which nodes apply to the Drive Group, then add each nodes to the - `spec.storage.nodes` list. - 1. Determine which nodes are already hosting OSDs using the below one-liner to list the nodes. - ```sh - kubectl -n $ROOK_CLUSTER_NAMESPACE get pod --selector 'app==rook-ceph-osd' --output custom-columns='NAME:.metadata.name,NODE:.spec.nodeName,LABELS:.metadata.labels' --no-headers | grep -v ceph.rook.io/pvc | awk '{print $2}' | uniq - ``` - 1. Or, you can use labels on Kubernetes nodes and `spec.placement.osd.nodeAffinity` to tell Rook - which nodes should be running OSDs. [See also](ceph-cluster-crd.md#node-affinity). - -You may wish to reference the Rook issue where deprecation of this feature was introduced: -https://github.com/rook/rook/issues/7275. diff --git a/Documentation/development-flow.md b/Documentation/development-flow.md index 414d589e4061..7ed63d6ab374 100644 --- a/Documentation/development-flow.md +++ b/Documentation/development-flow.md @@ -38,16 +38,10 @@ cd rook ### Build +Building Rook-Ceph is simple. + ```console -# build all rook storage providers make - -# build a single storage provider, where the IMAGES can be a subdirectory of the "images" folder: -# "cassandra", "ceph", or "nfs" -make IMAGES="cassandra" build - -# multiple storage providers can also be built -make IMAGES="cassandra ceph" build ``` If you want to use `podman` instead of `docker` then uninstall `docker` packages from your machine, make will automatically pick up `podman`. @@ -105,6 +99,7 @@ rook ├── cluster │   ├── charts # Helm charts │   │   └── rook-ceph +│   │   └── rook-ceph-cluster │   └── examples # Sample yaml files for Rook cluster │ ├── cmd # Binaries with main entrypoint @@ -119,8 +114,6 @@ rook │   ├── apis │   │   ├── ceph.rook.io # ceph specific specs for cluster, file, object │   │   │   ├── v1 -│   │   ├── nfs.rook.io # nfs server specific specs -│   │   │   └── v1alpha1 │   │   └── rook.io # rook.io API group of common types │   │   └── v1alpha2 │   ├── client # auto-generated strongly typed client code to access Rook APIs @@ -132,7 +125,6 @@ rook │   │   ├── ceph │   │   ├── discover │   │   ├── k8sutil -│   │   ├── nfs │   │   └── test │   ├── test │   ├── util @@ -143,7 +135,6 @@ rook    │   ├── installer # installs Rook and its supported storage providers into integration tests environments    │   └── utils    ├── integration # all test cases that will be invoked during integration testing -    ├── longhaul # longhaul tests    └── scripts # scripts for setting up integration and manual testing environments ``` @@ -157,7 +148,6 @@ To add a feature or to make a bug fix, you will need to create a branch in your For new features of significant scope and complexity, a design document is recommended before work begins on the implementation. So create a design document if: -* Adding a new storage provider * Adding a new CRD * Adding a significant feature to an existing storage provider. If the design is simple enough to describe in a github issue, you likely don't need a full design doc. @@ -215,15 +205,21 @@ Rebasing is a very powerful feature of Git. You need to understand how it works ## Submitting a Pull Request -Once you have implemented the feature or bug fix in your branch, you will open a PR to the upstream rook repo. Before opening the PR ensure you have added unit tests, are passing the integration tests, cleaned your commit history, and have rebased on the latest upstream. +Once you have implemented the feature or bug fix in your branch, you will open a Pull Request (PR) +to the [upstream Rook repository](https://github.com/rook/rook). Before opening the PR ensure you +have added unit tests and all unit tests are passing. Please clean your commit history and rebase on +the latest upstream changes. + +See [Unit Tests](#unit-tests) below for instructions on how to run unit tests. In order to open a pull request (PR) it is required to be up to date with the latest changes upstream. If other commits are pushed upstream before your PR is merged, you will also need to rebase again before it will be merged. ### Regression Testing -All pull requests must pass the unit and integration tests before they can be merged. These tests automatically -run as a part of the build process. The results of these tests along with code reviews and other criteria determine whether -your request will be accepted into the `rook/rook` repo. It is prudent to run all tests locally on your development box prior to submitting a pull request to the `rook/rook` repo. +All pull requests must pass the unit and integration tests before they can be merged. These tests +automatically run against every pull request as a part of Rook's continuous integration (CI) +process. The results of these tests along with code reviews and other criteria determine whether +your request will be accepted into the `rook/rook` repo. #### Unit Tests @@ -240,10 +236,38 @@ go test -coverprofile=coverage.out go tool cover -html=coverage.out -o coverage.html ``` +#### Writing unit tests + +There is no one-size-fits-all approach to unit testing, but we attempt to provide good tips for +writing unit tests for Rook below. + +Unit tests should help people reading and reviewing the code understand the intended behavior of the +code. + +Good unit tests start with easily testable code. Small chunks ("units") of code can be easily tested +for every possible input. Higher-level code units that are built from smaller, already-tested units +can more easily verify that the units are combined together correctly. + +Common cases that may need tests: +* the feature is enabled +* the feature is disabled +* the feature is only partially enabled, for every possible way it can be partially enabled +* every error that can be encountered during execution of the feature +* the feature can be disabled (including partially) after it was enabled +* the feature can be modified (including partially) after it was enabled +* if there is a slice/array involved, test length = 0, length = 1, length = 3, length == max, length > max +* an input is not specified, for each input +* an input is specified incorrectly, for each input +* a resource the code relies on doesn't exist, for each dependency + + #### Running the Integration Tests -For instructions on how to execute the end to end smoke test suite, -follow the [test instructions](https://github.com/rook/rook/blob/master/tests/README.md). +Rook's upstream continuous integration (CI) tests will run integration tests against your changes +automatically. + +You do not need to run these tests locally, but you may if you like. For instructions on how to do +so, follow the [test instructions](https://github.com/rook/rook/blob/master/tests/README.md). ### Commit structure @@ -269,16 +293,7 @@ Closes: https://github.com/rook/rook/issues/ Signed-off-by: First Name Last Name ``` -The `component` **MUST** be one of the following: -- bot -- build -- cassandra -- ceph -- ci -- core -- docs -- nfs -- test +The `component` **MUST** be in the [list checked by the CI](https://github.com/rook/rook/blob/master/.commitlintrc.json). Note: sometimes you will feel like there is not so much to say, for instance if you are fixing a typo in a text. In that case, it is acceptable to shorten the commit message. @@ -317,18 +332,7 @@ By default, you should always open a pull request against master. The flow for getting a fix into a release branch is: 1. Open a PR to merge the changes to master following the process outlined above. -2. Add the backport label to that PR such as backport-release-1.1 +2. Add the backport label to that PR such as backport-release-1.7 3. After your PR is merged to master, the mergify bot will automatically open a PR with your commits backported to the release branch 4. If there are any conflicts you will need to resolve them by pulling the branch, resolving the conflicts and force push back the branch 5. After the CI is green, the bot will automatically merge the backport PR. - -## Debugging operators locally - -Operators are meant to be run inside a Kubernetes cluster. However, this makes it harder to use debugging tools and slows down the developer cycle of edit-build-test since testing requires to build a container image, push to the cluster, restart the pods, get logs, etc. - -A common operator developer practice is to run the operator locally on the developer machine in order to leverage the developer tools and comfort. - -In order to support this external operator mode, rook detects if the operator is running outside of the cluster (using standard cluster env) and changes the behavior as follows: - -* Connecting to Kubernetes API will load the config from the user `~/.kube/config`. -* Instead of the default [CommandExecutor](../pkg/util/exec/exec.go) this mode uses a [TranslateCommandExecutor](../pkg/util/exec/translate_exec.go) that executes every command issued by the operator to run as a Kubernetes job inside the cluster, so that any tools that the operator needs from its image can be called. diff --git a/Documentation/flexvolume.md b/Documentation/flexvolume.md index 58af53e38a55..f45ac23bbf75 100644 --- a/Documentation/flexvolume.md +++ b/Documentation/flexvolume.md @@ -23,7 +23,6 @@ Platform-specific instructions for the following Kubernetes deployment platforms * [OpenShift](#openshift) * [OpenStack Magnum](#openstack-magnum) * [Rancher](#rancher) -* [Tectonic](#tectonic) * [Custom containerized kubelet](#custom-containerized-kubelet) * [Configuring the FlexVolume path](#configuring-the-flexvolume-path) @@ -132,13 +131,6 @@ FlexVolume path for the Rook operator. If the default path as above is used no further configuration is required, otherwise if a different path is used the Rook operator will need to be reconfigured, to do this continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. -## Tectonic - -Follow [these instructions](tectonic.md) to configure the Flexvolume plugin for Rook on Tectonic during ContainerLinux node ignition file provisioning. -If you want to use Rook with an already provisioned Tectonic cluster, please refer to the [ContainerLinux](#containerlinux) section. - -Continue with [configuring the FlexVolume path](#configuring-the-flexvolume-path) to configure Rook to use the FlexVolume path. - ## Custom containerized kubelet Use the [most common read/write FlexVolume path](#most-common-readwrite-flexvolume-path) for the next steps. diff --git a/Documentation/helm-ceph-cluster.md b/Documentation/helm-ceph-cluster.md index be956166e269..07fc35a4a88e 100644 --- a/Documentation/helm-ceph-cluster.md +++ b/Documentation/helm-ceph-cluster.md @@ -8,7 +8,12 @@ indent: true # Ceph Cluster Helm Chart -Installs a [Ceph](https://ceph.io/) cluster on Rook using the [Helm](https://helm.sh) package manager. +Creates Rook resources to configure a [Ceph](https://ceph.io/) cluster using the [Helm](https://helm.sh) package manager. +This chart is a simple packaging of templates that will optionally create Rook resources such as: +- CephCluster, CephFilesystem, and CephObjectStore CRs +- Storage classes to expose Ceph RBD volumes, CephFS volumes, and RGW buckets +- Ingress for external access to the dashboard +- Toolbox ## Prerequisites @@ -26,11 +31,12 @@ into the same namespace as the operator or a separate namespace. Rook currently publishes builds of this chart to the `release` and `master` channels. **Before installing, review the values.yaml to confirm if the default settings need to be updated.** -- If the operator was installed in a namespace other than `rook-ceph`, the namespace +* If the operator was installed in a namespace other than `rook-ceph`, the namespace must be set in the `operatorNamespace` variable. -- Set the desired settings in the `cephClusterSpec`. The [defaults](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph-cluster/values.yaml) +* Set the desired settings in the `cephClusterSpec`. The [defaults](https://github.com/rook/rook/tree/{{ branchName }}/cluster/charts/rook-ceph-cluster/values.yaml) are only an example and not likely to apply to your cluster. -- The `monitoring` section should be removed from the `cephClusterSpec`, as it is specified separately in the helm settings. +* The `monitoring` section should be removed from the `cephClusterSpec`, as it is specified separately in the helm settings. +* The default values for `cephBlockPools`, `cephFileSystems`, and `CephObjectStores` will create one of each, and their corresponding storage classes. ### Release @@ -48,22 +54,66 @@ helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \ The following tables lists the configurable parameters of the rook-operator chart and their default values. -| Parameter | Description | Default | -| --------------------- | -------------------------------------------------------------------- | ----------- | -| `operatorNamespace` | Namespace of the Rook Operator | `rook-ceph` | -| `configOverride` | Cluster ceph.conf override | | -| `toolbox.enabled` | Enable Ceph debugging pod deployment. See [toolbox](ceph-toolbox.md) | `false` | -| `toolbox.tolerations` | Toolbox tolerations | `[]` | -| `toolbox.affinity` | Toolbox affinity | `{}` | -| `monitoring.enabled` | Enable Prometheus integration, will also create necessary RBAC rules | `false` | -| `cephClusterSpec.*` | Cluster configuration, see below | See below | - +| Parameter | Description | Default | +| ---------------------- | -------------------------------------------------------------------- | ----------- | +| `operatorNamespace` | Namespace of the Rook Operator | `rook-ceph` | +| `configOverride` | Cluster ceph.conf override | | +| `toolbox.enabled` | Enable Ceph debugging pod deployment. See [toolbox](ceph-toolbox.md) | `false` | +| `toolbox.tolerations` | Toolbox tolerations | `[]` | +| `toolbox.affinity` | Toolbox affinity | `{}` | +| `monitoring.enabled` | Enable Prometheus integration, will also create necessary RBAC rules | `false` | +| `cephClusterSpec.*` | Cluster configuration, see below | See below | +| `ingress.dashboard` | Enable an ingress for the ceph-dashboard | `{}` | +| `cephBlockPools.[*]` | A list of CephBlockPool configurations to deploy | See below | +| `cephFileSystems.[*]` | A list of CephFileSystem configurations to deploy | See below | +| `cephObjectStores.[*]` | A list of CephObjectStore configurations to deploy | See below | ### Ceph Cluster Spec The `CephCluster` CRD takes its spec from `cephClusterSpec.*`. This is not an exhaustive list of parameters. For the full list, see the [Cluster CRD](ceph-cluster-crd.md) topic. +### Ceph Block Pools + +The `cephBlockPools` array in the values file will define a list of CephBlockPool as described in the table below. + +| Parameter | Description | Default | +| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `name` | The name of the CephBlockPool | `ceph-blockpool` | +| `spec` | The CephBlockPool spec, see the [CephBlockPool](ceph-pool-crd.md#spec) documentation. | `{}` | +| `storageClass.enabled` | Whether a storage class is deployed alongside the CephBlockPool | `true` | +| `storageClass.isDefault` | Whether the storage class will be the default storage class for PVCs. See the PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) documentation for details. | `true` | +| `storageClass.name` | The name of the storage class | `ceph-block` | +| `storageClass.parameters` | See [Block Storage](ceph-block.md) documentation or the helm values.yaml for suitable values | see values.yaml | +| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | +| `storageClass.allowVolumeExpansion` | Whether [volume expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion) is allowed by default. | `true` | + +### Ceph File Systems + +The `cephFileSystems` array in the values file will define a list of CephFileSystem as described in the table below. + +| Parameter | Description | Default | +| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | +| `name` | The name of the CephFileSystem | `ceph-filesystem` | +| `spec` | The CephFileSystem spec, see the [CephFilesystem CRD](ceph-filesystem-crd.md) documentation. | see values.yaml | +| `storageClass.enabled` | Whether a storage class is deployed alongside the CephFileSystem | `true` | +| `storageClass.name` | The name of the storage class | `ceph-filesystem` | +| `storageClass.parameters` | See [Shared Filesystem](ceph-filesystem.md) documentation or the helm values.yaml for suitable values | see values.yaml | +| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | + +### Ceph Object Stores + +The `cephObjectStores` array in the values file will define a list of CephObjectStore as described in the table below. + +| Parameter | Description | Default | +| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `name` | The name of the CephObjectStore | `ceph-objectstore` | +| `spec` | The CephObjectStore spec, see the [CephObjectStore CRD](ceph-object-store-crd.md) documentation. | see values.yaml | +| `storageClass.enabled` | Whether a storage class is deployed alongside the CephObjectStore | `true` | +| `storageClass.name` | The name of the storage class | `ceph-bucket` | +| `storageClass.parameters` | See [Object Store storage class](ceph-object-bucket-claim.md) documentation or the helm values.yaml for suitable values | see values.yaml | +| `storageClass.reclaimPolicy` | The default [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) to apply to PVCs created with this storage class. | `Delete` | + ### Existing Clusters If you have an existing CephCluster CR that was created without the helm chart and you want the helm diff --git a/Documentation/helm-operator.md b/Documentation/helm-operator.md index 2812a1b63114..ff816dd8bfe3 100644 --- a/Documentation/helm-operator.md +++ b/Documentation/helm-operator.md @@ -25,7 +25,7 @@ See the [Helm support matrix](https://helm.sh/docs/topics/version_skew/) for mor The Ceph Operator helm chart will install the basic components necessary to create a storage platform for your Kubernetes cluster. 1. Install the Helm chart -1. [Create a Rook cluster](ceph-quickstart.md#create-a-rook-cluster). +1. [Create a Rook cluster](quickstart.md#create-a-rook-cluster). The `helm install` command deploys rook on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. It is recommended that the rook operator be installed into the `rook-ceph` namespace (you will install your clusters into separate namespaces). @@ -50,8 +50,7 @@ To deploy from a local build from your development environment: ```console cd cluster/charts/rook-ceph -kubectl create namespace rook-ceph -helm install --namespace rook-ceph rook-ceph . +helm install --create-namespace --namespace rook-ceph rook-ceph . ``` ## Uninstalling the Chart @@ -106,8 +105,9 @@ The following tables lists the configurable parameters of the rook-operator char | `csi.provisionerPriorityClassName` | PriorityClassName to be set on csi driver provisioner pods. | | | `csi.enableOMAPGenerator` | EnableOMAP generator deploys omap sidecar in CSI provisioner pod, to enable it set it to true | `false` | | `csi.rbdFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted | ReadWriteOnceWithFSType | -| `csi.cephFSFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted | ReadWriteOnceWithFSType | +| `csi.cephFSFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted | `None` | | `csi.logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `0` | +| `csi.provisionerReplicas` | Set replicas for csi provisioner deployment. | `2` | | `csi.enableGrpcMetrics` | Enable Ceph CSI GRPC Metrics. | `false` | | `csi.enableCSIHostNetwork` | Enable Host Networking for Ceph CSI nodeplugins. | `false` | | `csi.provisionerTolerations` | Array of tolerations in YAML format which will be added to CSI provisioner deployment. | | @@ -132,14 +132,14 @@ The following tables lists the configurable parameters of the rook-operator char | `csi.rbdLivenessMetricsPort` | Ceph CSI RBD driver metrics port. | `8080` | | `csi.forceCephFSKernelClient` | Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs. | `true` | | `csi.kubeletDirPath` | Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) | `/var/lib/kubelet` | -| `csi.cephcsi.image` | Ceph CSI image. | `quay.io/cephcsi/cephcsi:v3.3.1` | +| `csi.cephcsi.image` | Ceph CSI image. | `quay.io/cephcsi/cephcsi:v3.4.0` | | `csi.rbdPluginUpdateStrategy` | CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | | `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` | -| `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0` | -| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.2.0` | -| `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2` | -| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1` | -| `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.2.1` | +| `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0` | +| `csi.resizer.image` | Kubernetes CSI resizer image. | `k8s.gcr.io/sig-storage/csi-resizer:v1.3.0` | +| `csi.provisioner.image` | Kubernetes CSI provisioner image. | `k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0` | +| `csi.snapshotter.image` | Kubernetes CSI snapshotter image. | `k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0` | +| `csi.attacher.image` | Kubernetes CSI Attacher image. | `k8s.gcr.io/sig-storage/csi-attacher:v3.3.0` | | `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Pods. | | | `csi.rbdPodLabels` | Labels to add to the CSI RBD Pods. | | | `csi.volumeReplication.enabled` | Enable Volume Replication. | `false` | diff --git a/Documentation/helm.md b/Documentation/helm.md index 404f65fa4fd8..4b919d77836e 100644 --- a/Documentation/helm.md +++ b/Documentation/helm.md @@ -3,12 +3,16 @@ title: Helm Charts weight: 10000 --- +{% include_relative branch.liquid %} + # Helm Charts -Rook has published a Helm chart for the [operator](helm-operator.md). Other Helm charts will also be potentially developed for each of the -CRDs for all Rook storage backends. +Rook has published the following Helm charts for the Ceph storage provider: -* [Rook Ceph Operator](helm-operator.md): Installs the Ceph Operator -* [Rook Ceph Cluster](helm-ceph-cluster.md): Configures resources necessary to run a Ceph cluster +* [Rook Ceph Operator](helm-operator.md): Starts the Ceph Operator, which will watch for Ceph CRs (custom resources) +* [Rook Ceph Cluster](helm-ceph-cluster.md): Creates Ceph CRs that the operator will use to configure the cluster -Contributions are welcome to create our other Helm charts! +The Helm charts are intended to simplify deployment and upgrades. +Configuring the Rook resources without Helm is also fully supported by creating the +[manifests](https://github.com/rook/rook/tree/{{ branchName }}/cluster/examples/kubernetes) +directly. diff --git a/Documentation/k8s-pre-reqs.md b/Documentation/k8s-pre-reqs.md deleted file mode 100644 index df45dc19fa87..000000000000 --- a/Documentation/k8s-pre-reqs.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Prerequisites -weight: 1000 ---- -{% include_relative branch.liquid %} - -# Prerequisites - -Rook can be installed on any existing Kubernetes cluster as long as it meets the minimum version -and Rook is granted the required privileges (see below for more information). If you don't have a Kubernetes cluster, -you can quickly set one up using [Minikube](#minikube), [Kubeadm](#kubeadm) or [CoreOS/Vagrant](#new-local-kubernetes-cluster-with-vagrant). - -## Minimum Version - -Kubernetes **v1.11** or higher is supported for the Ceph operator. -Kubernetes **v1.16** or higher is supported for the Cassandra and NFS operators. - -**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Ceph CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. - -## Ceph Prerequisites - -See also **[Ceph Prerequisites](ceph-prerequisites.md)**. - -## Pod Security Policies - -Rook requires privileges to manage the storage in your cluster. If you have Pod Security Policies enabled -please review this section. By default, Kubernetes clusters do not have PSPs enabled so you may -be able to skip this section. - -If you are configuring Ceph on OpenShift, the Ceph walkthrough will configure the PSPs as well -when you start the operator with [operator-openshift.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator-openshift.yaml). - -### Cluster Role - -> **NOTE**: Cluster role configuration is only needed when you are not already `cluster-admin` in your Kubernetes cluster! - -Creating the Rook operator requires privileges for setting up RBAC. To launch the operator you need to have created your user certificate that is bound to ClusterRole `cluster-admin`. - -One simple way to achieve it is to assign your certificate with the `system:masters` group: - -```console --subj "/CN=admin/O=system:masters" -``` - -`system:masters` is a special group that is bound to `cluster-admin` ClusterRole, but it can't be easily revoked so be careful with taking that route in a production setting. -Binding individual certificate to ClusterRole `cluster-admin` is revocable by deleting the ClusterRoleBinding. - -### RBAC for PodSecurityPolicies - -If you have activated the [PodSecurityPolicy Admission Controller](https://kubernetes.io/docs/admin/admission-controllers/#podsecuritypolicy) and thus are -using [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/), you will require additional `(Cluster)RoleBindings` -for the different `ServiceAccounts` Rook uses to start the Rook Storage Pods. - -Security policies will differ for different backends. See Ceph's Pod Security Policies set up in -[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) -for an example of how this is done in practice. - -### PodSecurityPolicy - -You need at least one `PodSecurityPolicy` that allows privileged `Pod` execution. Here is an example -which should be more permissive than is needed for any backend: - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: privileged -spec: - fsGroup: - rule: RunAsAny - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' - allowedCapabilities: - - '*' - hostPID: true - # hostNetwork is required for using host networking - hostNetwork: false -``` - -**Hint**: Allowing `hostNetwork` usage is required when using `hostNetwork: true` in a Cluster `CustomResourceDefinition`! -You are then also required to allow the usage of `hostPorts` in the `PodSecurityPolicy`. The given -port range will allow all ports: - -```yaml - hostPorts: - # Ceph msgr2 port - - min: 1 - max: 65535 -``` - -## Authenticated docker registries - -If you want to use an image from authenticated docker registry (e.g. for image cache/mirror), you'll need to -add an `imagePullSecret` to all relevant service accounts. This way all pods created by the operator (for service account: -`rook-ceph-system`) or all new pods in the namespace (for service account: `default`) will have the `imagePullSecret` added -to their spec. - -The whole process is described in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). - -### Example setup for a ceph cluster - -To get you started, here's a quick rundown for the ceph example from the [quickstart guide](/Documentation/ceph-quickstart.md). - -First, we'll create the secret for our registry as described [here](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod): - -```console -# for namespace rook-ceph -$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - -# and for namespace rook-ceph (cluster) -$ kubectl -n rook-ceph create secret docker-registry my-registry-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL -``` - -Next we'll add the following snippet to all relevant service accounts as described [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account): - -```yaml -imagePullSecrets: -- name: my-registry-secret -``` - -The service accounts are: - -* `rook-ceph-system` (namespace: `rook-ceph`): Will affect all pods created by the rook operator in the `rook-ceph` namespace. -* `default` (namespace: `rook-ceph`): Will affect most pods in the `rook-ceph` namespace. -* `rook-ceph-mgr` (namespace: `rook-ceph`): Will affect the MGR pods in the `rook-ceph` namespace. -* `rook-ceph-osd` (namespace: `rook-ceph`): Will affect the OSD pods in the `rook-ceph` namespace. - -You can do it either via e.g. `kubectl -n edit serviceaccount default` or by modifying the [`operator.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml) -and [`cluster.yaml`](https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml) before deploying them. - -Since it's the same procedure for all service accounts, here is just one example: - -```console -kubectl -n rook-ceph edit serviceaccount default -``` - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default - namespace: rook-ceph -secrets: -- name: default-token-12345 -imagePullSecrets: # here are the new -- name: my-registry-secret # parts -``` - -After doing this for all service accounts all pods should be able to pull the image from your registry. - -## Bootstrapping Kubernetes - -Rook will run wherever Kubernetes is running. Here are a couple of simple environments to help you get started with Rook. - -* [Minikube](https://github.com/kubernetes/minikube/releases): A single-node cluster, simplest to get started -* [Kubeadm](https://kubernetes.io/docs/setup/independent/install-kubeadm/): One or more nodes for more comprehensive deployments diff --git a/Documentation/media/edgefs-isgw-edit.png b/Documentation/media/edgefs-isgw-edit.png deleted file mode 100644 index 8bbce92f1893..000000000000 Binary files a/Documentation/media/edgefs-isgw-edit.png and /dev/null differ diff --git a/Documentation/media/edgefs-isgw.png b/Documentation/media/edgefs-isgw.png deleted file mode 100644 index af413c7418da..000000000000 Binary files a/Documentation/media/edgefs-isgw.png and /dev/null differ diff --git a/Documentation/media/edgefs-rook.png b/Documentation/media/edgefs-rook.png deleted file mode 100644 index a62a6f9d96cf..000000000000 Binary files a/Documentation/media/edgefs-rook.png and /dev/null differ diff --git a/Documentation/media/edgefs-ui-dashboard.png b/Documentation/media/edgefs-ui-dashboard.png deleted file mode 100644 index 31c589b4e7bd..000000000000 Binary files a/Documentation/media/edgefs-ui-dashboard.png and /dev/null differ diff --git a/Documentation/media/edgefs-ui-nfs-edit.png b/Documentation/media/edgefs-ui-nfs-edit.png deleted file mode 100644 index fad41d819c07..000000000000 Binary files a/Documentation/media/edgefs-ui-nfs-edit.png and /dev/null differ diff --git a/Documentation/media/minio_demo.png b/Documentation/media/minio_demo.png deleted file mode 100644 index 64c6dfad00e4..000000000000 Binary files a/Documentation/media/minio_demo.png and /dev/null differ diff --git a/Documentation/media/nfs-webhook-deployment.png b/Documentation/media/nfs-webhook-deployment.png deleted file mode 100644 index df3cfe4b40f3..000000000000 Binary files a/Documentation/media/nfs-webhook-deployment.png and /dev/null differ diff --git a/Documentation/media/nfs-webhook-validation-flow.png b/Documentation/media/nfs-webhook-validation-flow.png deleted file mode 100644 index 67f7c8a6116a..000000000000 Binary files a/Documentation/media/nfs-webhook-validation-flow.png and /dev/null differ diff --git a/Documentation/media/rook-architecture.png b/Documentation/media/rook-architecture.png deleted file mode 100644 index 2118717cf361..000000000000 Binary files a/Documentation/media/rook-architecture.png and /dev/null differ diff --git a/Documentation/nfs-crd.md b/Documentation/nfs-crd.md deleted file mode 100644 index 72b355f087ad..000000000000 --- a/Documentation/nfs-crd.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: NFS Server CRD -weight: 8000 ---- - -# NFS Server CRD - -NFS Server can be created and configured using the `nfsservers.nfs.rook.io` custom resource definition (CRD). -Please refer to the [user guide walk-through](nfs.md) for complete instructions. -This page will explain all the available configuration options on the NFS CRD. - -## Sample - -The parameters to configure the NFS CRD are demonstrated in the example below which is followed by a table that explains the parameters in more detail. - -Below is a very simple example that shows sharing a volume (which could be hostPath, cephFS, cephRBD, googlePD, EBS, etc.) using NFS, without any client or per export based configuration. - -For a `PersistentVolumeClaim` named `googlePD-claim`, which has Read/Write permissions and no squashing, the NFS CRD instance would look like the following: - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: googlePD-claim - # A key/value list of annotations - annotations: - # key: value -``` - -## Settings - -The table below explains in detail each configuration option that is available in the NFS CRD. - -| Parameter | Description | Default | -| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| `replicas` | The number of NFS daemon to start | `1` | -| `annotations` | Key value pair list of annotations to add. | `[]` | -| `exports` | Parameters for creating an export | `` | -| `exports.name` | Name of the volume being shared | `` | -| `exports.server` | NFS server configuration | `` | -| `exports.server.accessMode` | Volume access modes (Reading and Writing) for the share (Valid options are `ReadOnly`, `ReadWrite` and `none`) | `ReadWrite` | -| `exports.server.squash` | This prevents root users connected remotely from having root privileges (valid options are `none`, `rootId`, `root` and `all`) | `none` | -| `exports.server.allowedClients` | Access configuration for clients that can consume the NFS volume | `` | -| `exports.server.allowedClients.name` | Name of the host/hosts | `` | -| `exports.server.allowedClients.clients` | The host or network to which the export is being shared. Valid entries for this field are host names, IP addresses, netgroups, and CIDR network addresses. | `` | -| `exports.server.allowedClients.accessMode` | Reading and Writing permissions for the client* (valid options are same as `exports.server.accessMode`) | `ReadWrite` | -| `exports.server.allowedClients.squash` | Squash option for the client* (valid options are same as `exports.server.squash`) | `none` | -| `exports.persistentVolumeClaim` | The PVC that will serve as the backing volume to be exported by the NFS server. Any PVC is allowed, such as host paths, CephFS, Ceph RBD, Google PD, Amazon EBS, etc.. | `` | -| `exports.persistentVolumeClaim.claimName` | Name of the PVC | `` | - -*note: if `exports.server.allowedClients.accessMode` and `exports.server.allowedClients.squash` options are specified, `exports.server.accessMode` and `exports.server.squash` are overridden respectively. - -Description for `volumes.allowedClients.squash` valid options are: - -| Option | Description | -| -------- | --------------------------------------------------------------------------------- | -| `none` | No user id squashing is performed | -| `rootId` | UID `0` and GID `0` are squashed to the anonymous uid and anonymous GID. | -| `root` | UID `0` and GID of any value are squashed to the anonymous uid and anonymous GID. | -| `all` | All users are squashed | - -The volume that needs to be exported by NFS must be attached to NFS server pod via PVC. Examples of volume that can be attached are Host Path, AWS Elastic Block Store, GCE Persistent Disk, CephFS, RBD etc. The limitations of these volumes also apply while they are shared by NFS. The limitation and other details about these volumes can be found [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - -## Examples - -This section contains some examples for more advanced scenarios and configuration options. - -### Single volume exported for access by multiple clients - -This example shows how to share a volume with different options for different clients accessing the share. -The EBS volume (represented by a PVC) will be exported by the NFS server for client access as `/nfs-share` (note that this PVC must already exist). - -The following client groups are allowed to access this share: - -* `group1` with IP address `172.17.0.5` will be given Read Only access with the root user squashed. -* `group2` includes both the network range of `172.17.0.5/16` and a host named `serverX`. They will all be granted Read/Write permissions with no user squash. - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - allowedClients: - - name: group1 - clients: 172.17.0.5 - accessMode: ReadOnly - squash: root - - name: group2 - clients: - - 172.17.0.0/16 - - serverX - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: ebs-claim -``` - -### Multiple volumes - -This section provides an example of how to share multiple volumes from one NFS server. -These volumes can all be different types (e.g., Google PD and Ceph RBD). -Below we will share an Amazon EBS volume as well as a CephFS volume, using differing configuration for the two: - -* The EBS volume is named `share1` and is available for all clients with Read Only access and no squash. -* The CephFS volume is named `share2` and is available for all clients with Read/Write access and no squash. - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-multi-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: share1 - server: - allowedClients: - - name: ebs-host - clients: all - accessMode: ReadOnly - squash: none - persistentVolumeClaim: - claimName: ebs-claim - - name: share2 - server: - allowedClients: - - name: ceph-host - clients: all - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: cephfs-claim -``` diff --git a/Documentation/nfs.md b/Documentation/nfs.md deleted file mode 100644 index e8227e82392f..000000000000 --- a/Documentation/nfs.md +++ /dev/null @@ -1,602 +0,0 @@ ---- -title: Network Filesystem (NFS) -weight: 800 -indent: true ---- -{% include_relative branch.liquid %} - -# Network Filesystem (NFS) - -NFS allows remote hosts to mount filesystems over a network and interact with those filesystems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network. - -## Prerequisites - -1. A Kubernetes cluster (v1.16 or higher) is necessary to run the Rook NFS operator. To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](k8s-pre-reqs.md). -2. The desired volume to export needs to be attached to the NFS server pod via a [PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). -Any type of PVC can be attached and exported, such as Host Path, AWS Elastic Block Store, GCP Persistent Disk, CephFS, Ceph RBD, etc. -The limitations of these volumes also apply while they are shared by NFS. -You can read further about the details and limitations of these volumes in the [Kubernetes docs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). -3. NFS client packages must be installed on all nodes where Kubernetes might run pods with NFS mounted. Install `nfs-utils` on CentOS nodes or `nfs-common` on Ubuntu nodes. - -## Deploy NFS Operator - -First deploy the Rook NFS operator using the following commands: - -```console -$ git clone --single-branch --branch {{ branchName }} https://github.com/rook/rook.git -cd rook/cluster/examples/kubernetes/nfs -kubectl create -f crds.yaml -kubectl create -f operator.yaml -``` - -You can check if the operator is up and running with: - -```console -kubectl -n rook-nfs-system get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-operator-879f5bf8b-gnwht 1/1 Running 0 29m ->``` - -## Deploy NFS Admission Webhook (Optional) - -Admission webhooks are HTTP callbacks that receive admission requests to the API server. Two types of admission webhooks is validating admission webhook and mutating admission webhook. NFS Operator support validating admission webhook which validate the NFSServer object sent to the API server before stored in the etcd (persisted). - -To enable admission webhook on NFS such as validating admission webhook, you need to do as following: - -First, ensure that `cert-manager` is installed. If it is not installed yet, you can install it as described in the `cert-manager` [installation](https://cert-manager.io/docs/installation/kubernetes/) documentation. Alternatively, you can simply just run the single command below: - -```console -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.1/cert-manager.yaml -``` - -This will easily get the latest version (`v0.15.1`) of `cert-manager` installed. After that completes, make sure the cert-manager component deployed properly and is in the `Running` status: - -```console -kubectl get -n cert-manager pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->cert-manager-7747db9d88-jmw2f 1/1 Running 0 2m1s ->cert-manager-cainjector-87c85c6ff-dhtl8 1/1 Running 0 2m1s ->cert-manager-webhook-64dc9fff44-5g565 1/1 Running 0 2m1s ->``` - -Once `cert-manager` is running, you can now deploy the NFS webhook: - -```console -kubectl create -f webhook.yaml -``` - -Verify the webhook is up and running: - -```console -kubectl -n rook-nfs-system get pod -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-operator-78d86bf969-k7lqp 1/1 Running 0 102s ->rook-nfs-webhook-74749cbd46-6jw2w 1/1 Running 0 102s ->``` - -## Create Openshift Security Context Constraints (Optional) - -On OpenShift clusters, we will need to create some additional security context constraints. If you are **not** running in OpenShift you can skip this and go to the [next section](#create-and-initialize-nfs-server). - -To create the security context constraints for nfs-server pods, we can use the following yaml, which is also found in `scc.yaml` under `/cluster/examples/kubernetes/nfs`. - -> *NOTE: Older versions of OpenShift may require ```apiVersion: v1```* - -```yaml -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-nfs -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegedContainer: false -allowedCapabilities: -- SYS_ADMIN -- DAC_READ_SEARCH -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SYS_CHROOT -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- secret -users: - - system:serviceaccount:rook-nfs:rook-nfs-server -``` - -You can create scc with following command: - -```console -oc create -f scc.yaml -``` - -## Create Pod Security Policies (Recommended) - -We recommend you to create Pod Security Policies as well - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: rook-nfs-policy -spec: - privileged: true - fsGroup: - rule: RunAsAny - allowedCapabilities: - - DAC_READ_SEARCH - - SYS_RESOURCE - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - hostPath -``` - -Save this file with name `psp.yaml` and create with following command: - -```console -kubectl create -f psp.yaml -``` - -## Create and Initialize NFS Server - -Now that the operator is running, we can create an instance of a NFS server by creating an instance of the `nfsservers.nfs.rook.io` resource. -The various fields and options of the NFS server resource can be used to configure the server and its volumes to export. -Full details of the available configuration options can be found in the [NFS CRD documentation](nfs-crd.md). - -Before we create NFS Server we need to create `ServiceAccount` and `RBAC` rules - -```yaml ---- -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: rook-nfs ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: ["rook-nfs-policy"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -subjects: - - kind: ServiceAccount - name: rook-nfs-server - # replace with namespace where provisioner is deployed - namespace: rook-nfs -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io -``` - -Save this file with name `rbac.yaml` and create with following command: - -```console -kubectl create -f rbac.yaml -``` - -This guide has 3 main examples that demonstrate exporting volumes with a NFS server: - -1. [Default StorageClass example](#default-storageclass-example) -1. [XFS StorageClass example](#xfs-storageclass-example) -1. [Rook Ceph volume example](#rook-ceph-volume-example) - -### Default StorageClass example - -This first example will walk through creating a NFS server instance that exports storage that is backed by the default `StorageClass` for the environment you happen to be running in. -In some environments, this could be a host path, in others it could be a cloud provider virtual disk. -Either way, this example requires a default `StorageClass` to exist. - -Start by saving the below NFS CRD instance definition to a file called `nfs.yaml`: - -```yaml ---- -# A default storageclass must be present -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim - namespace: rook-nfs -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-default-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -With the `nfs.yaml` file saved, now create the NFS server as shown: - -```console -kubectl create -f nfs.yaml -``` - -### XFS StorageClass example - -Rook NFS support disk quota through `xfs_quota`. So if you need specify disk quota for your volumes you can follow this example. - -In this example, we will use an underlying volume mounted as `xfs` with `prjquota` option. Before you can create that underlying volume, you need to create `StorageClass` with `xfs` filesystem and `prjquota` mountOptions. Many distributed storage providers for Kubernetes support `xfs` filesystem. Typically by defining `fsType: xfs` or `fs: xfs` in storageClass parameters. But actually how to specify storage-class filesystem type is depend on the storage providers it self. You can see https://kubernetes.io/docs/concepts/storage/storage-classes/ for more details. - -Here is example `StorageClass` for GCE PD and AWS EBS - -- GCE PD - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -parameters: - type: pd-standard - fsType: xfs -mountOptions: - - prjquota -provisioner: kubernetes.io/gce-pd -reclaimPolicy: Delete -volumeBindingMode: Immediate -allowVolumeExpansion: true -``` - -- AWS EBS - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -provisioner: kubernetes.io/aws-ebs -parameters: - type: io1 - iopsPerGB: "10" - fsType: xfs -mountOptions: - - prjquota -reclaimPolicy: Delete -volumeBindingMode: Immediate -``` - -Once you already have `StorageClass` with `xfs` filesystem and `prjquota` mountOptions you can create NFS server instance with the following example. - -```yaml ---- -# A storage class with name standard-xfs must be present. -# The storage class must be has xfs filesystem type and prjquota mountOptions. -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-xfs-claim - namespace: rook-nfs -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-xfs-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -Save this PVC and NFS Server instance as `nfs-xfs.yaml` and create with following command. - -```console -kubectl create -f nfs-xfs.yaml -``` - -### Rook Ceph volume example - -In this alternative example, we will use a different underlying volume as an export for the NFS server. -These steps will walk us through exporting a Ceph RBD block volume so that clients can access it across the network. - -First, you have to [follow these instructions](ceph-quickstart.md) to deploy a sample Rook Ceph cluster that can be attached to the NFS server pod for sharing. -After the Rook Ceph cluster is up and running, we can create proceed with creating the NFS server. - -Save this PVC and NFS Server instance as `nfs-ceph.yaml`: - -```yaml ---- -# A rook ceph cluster must be running -# Create a rook ceph cluster using examples in rook/cluster/examples/kubernetes/ceph -# Refer to https://rook.io/docs/rook/master/ceph-quickstart.html for a quick rook cluster setup -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-ceph-claim - namespace: rook-nfs -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - # Create a Ceph cluster for using this example - # Create a ceph PVC after creating the rook ceph cluster using ceph-pvc.yaml - persistentVolumeClaim: - claimName: nfs-ceph-claim - # A key/value list of annotations - annotations: - rook: nfs -``` - -Create the NFS server instance that you saved in `nfs-ceph.yaml`: - -```console -kubectl create -f nfs-ceph.yaml -``` - -### Verify NFS Server - -We can verify that a Kubernetes object has been created that represents our new NFS server and its export with the command below. - -```console -kubectl -n rook-nfs get nfsservers.nfs.rook.io -``` - ->``` ->NAME AGE STATE ->rook-nfs 32s Running ->``` - -Verify that the NFS server pod is up and running: - -```console -kubectl -n rook-nfs get pod -l app=rook-nfs -``` - ->``` ->NAME READY STATUS RESTARTS AGE ->rook-nfs-0 1/1 Running 0 2m ->``` - -If the NFS server pod is in the `Running` state, then we have successfully created an exported NFS share that clients can start to access over the network. - - -## Accessing the Export - -Since Rook version v1.0, Rook supports dynamic provisioning of NFS. -This example will be showing how dynamic provisioning feature can be used for nfs. - -Once the NFS Operator and an instance of NFSServer is deployed. A storageclass similar to below example has to be created to dynamically provisioning volumes. - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: rook-nfs-share1 -parameters: - exportName: share1 - nfsServerName: rook-nfs - nfsServerNamespace: rook-nfs -provisioner: nfs.rook.io/rook-nfs-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate -``` - -You can save it as a file, eg: called `sc.yaml` Then create storageclass with following command. - -```console -kubectl create -f sc.yaml -``` - -> **NOTE**: The StorageClass need to have the following 3 parameters passed. -> -1. `exportName`: It tells the provisioner which export to use for provisioning the volumes. -2. `nfsServerName`: It is the name of the NFSServer instance. -3. `nfsServerNamespace`: It namespace where the NFSServer instance is running. - -Once the above storageclass has been created, you can create a PV claim referencing the storageclass as shown in the example given below. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share1" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi -``` - -You can also save it as a file, eg: called `pvc.yaml` Then create PV claim with following command. - -```console -kubectl create -f pvc.yaml -``` - -## Consuming the Export - -Now we can consume the PV that we just created by creating an example web server app that uses the above `PersistentVolumeClaim` to claim the exported volume. -There are 2 pods that comprise this example: - -1. A web server pod that will read and display the contents of the NFS share -1. A writer pod that will write random data to the NFS share so the website will continually update - -Start both the busybox pod (writer) and the web server from the `cluster/examples/kubernetes/nfs` folder: - -```console -kubectl create -f busybox-rc.yaml -kubectl create -f web-rc.yaml -``` - -Let's confirm that the expected busybox writer pod and web server pod are **all** up and in the `Running` state: - -```console -kubectl get pod -l app=nfs-demo -``` - -In order to be able to reach the web server over the network, let's create a service for it: - -```console -kubectl create -f web-service.yaml -``` - -We can then use the busybox writer pod we launched before to check that nginx is serving the data appropriately. -In the below 1-liner command, we use `kubectl exec` to run a command in the busybox writer pod that uses `wget` to retrieve the web page that the web server pod is hosting. As the busybox writer pod continues to write a new timestamp, we should see the returned output also update every ~10 seconds or so. - -```console -$ echo; kubectl exec $(kubectl get pod -l app=nfs-demo,role=busybox -o jsonpath='{.items[0].metadata.name}') -- wget -qO- http://$(kubectl get services nfs-web -o jsonpath='{.spec.clusterIP}'); echo -``` - ->``` ->Thu Oct 22 19:28:55 UTC 2015 ->nfs-busybox-w3s4t ->``` - -## Teardown - -To clean up all resources associated with this walk-through, you can run the commands below. - -```console -kubectl delete -f web-service.yaml -kubectl delete -f web-rc.yaml -kubectl delete -f busybox-rc.yaml -kubectl delete -f pvc.yaml -kubectl delete -f pv.yaml -kubectl delete -f nfs.yaml -kubectl delete -f nfs-xfs.yaml -kubectl delete -f nfs-ceph.yaml -kubectl delete -f rbac.yaml -kubectl delete -f psp.yaml -kubectl delete -f scc.yaml # if deployed -kubectl delete -f operator.yaml -kubectl delete -f webhook.yaml # if deployed -kubectl delete -f crds.yaml -``` - -## Troubleshooting - -If the NFS server pod does not come up, the first step would be to examine the NFS operator's logs: - -```console -kubectl -n rook-nfs-system logs -l app=rook-nfs-operator -``` diff --git a/Documentation/pod-security-policies.md b/Documentation/pod-security-policies.md new file mode 100644 index 000000000000..9062ab2a2a34 --- /dev/null +++ b/Documentation/pod-security-policies.md @@ -0,0 +1,67 @@ +--- +title: Pod Security Policies +weight: 1300 +indent: true +--- +{% include_relative branch.liquid %} + +## Pod Security Policies + +Rook requires privileges to manage the storage in your cluster. If you have Pod Security Policies enabled +please review this document. By default, Kubernetes clusters do not have PSPs enabled so you may +be able to skip this document. + +If you are configuring Ceph on OpenShift, the Ceph walkthrough will configure the PSPs as well +when you start the operator with [operator-openshift.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator-openshift.yaml). + +Creating the Rook operator requires privileges for setting up RBAC. To launch the operator you need to have created your user certificate that is bound to ClusterRole `cluster-admin`. + +### RBAC for PodSecurityPolicies + +If you have activated the [PodSecurityPolicy Admission Controller](https://kubernetes.io/docs/admin/admission-controllers/#podsecuritypolicy) and thus are +using [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/), you will require additional `(Cluster)RoleBindings` +for the different `ServiceAccounts` Rook uses to start the Rook Storage Pods. + +Security policies will differ for different backends. See Ceph's Pod Security Policies set up in +[common.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/common.yaml) +for an example of how this is done in practice. + +### PodSecurityPolicy + +You need at least one `PodSecurityPolicy` that allows privileged `Pod` execution. Here is an example +which should be more permissive than is needed for any backend: + +```yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged +spec: + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' + allowedCapabilities: + - '*' + hostPID: true + # hostNetwork is required for using host networking + hostNetwork: false +``` + +**Hint**: Allowing `hostNetwork` usage is required when using `hostNetwork: true` in a Cluster `CustomResourceDefinition`! +You are then also required to allow the usage of `hostPorts` in the `PodSecurityPolicy`. The given +port range will allow all ports: + +```yaml + hostPorts: + # Ceph msgr2 port + - min: 1 + max: 65535 +``` diff --git a/Documentation/ceph-prerequisites.md b/Documentation/pre-reqs.md similarity index 59% rename from Documentation/ceph-prerequisites.md rename to Documentation/pre-reqs.md index 5e9819aa6deb..a991a3452553 100644 --- a/Documentation/ceph-prerequisites.md +++ b/Documentation/pre-reqs.md @@ -1,18 +1,43 @@ --- title: Prerequisites -weight: 2010 -indent: true +weight: 1000 --- +{% include_relative branch.liquid %} -# Ceph Prerequisites +# Prerequisites -To make sure you have a Kubernetes cluster that is ready for `Rook`, review the general [Rook Prerequisites](k8s-pre-reqs.md). +Rook can be installed on any existing Kubernetes cluster as long as it meets the minimum version +and Rook is granted the required privileges (see below for more information). + +## Minimum Version + +Kubernetes **v1.11** or higher is supported for the Ceph operator. + +**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Ceph CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. + +## Ceph Prerequisites In order to configure the Ceph storage cluster, at least one of these local storage options are required: - Raw devices (no partitions or formatted filesystems) - Raw partitions (no formatted filesystem) - PVs available from a storage class in `block` mode +You can confirm whether your partitions or devices are formatted with filesystems with the following command. + +```console +lsblk -f +``` +>``` +>NAME FSTYPE LABEL UUID MOUNTPOINT +>vda +>└─vda1 LVM2_member >eSO50t-GkUV-YKTH-WsGq-hNJY-eKNf-3i07IB +> ├─ubuntu--vg-root ext4 c2366f76-6e21-4f10-a8f3-6776212e2fe4 / +> └─ubuntu--vg-swap_1 swap 9492a3dc-ad75-47cd-9596-678e8cf17ff9 [SWAP] +>vdb +>``` + +If the `FSTYPE` field is not empty, there is a filesystem on top of the corresponding device. In this example, you can use `vdb` for Ceph and can't use `vda` or its partitions. + ## LVM package Ceph OSDs have a dependency on LVM in the following scenarios: @@ -50,17 +75,6 @@ runcmd: - [ vgchange, -ay ] ``` -## Ceph Flexvolume Configuration - -**NOTE** This configuration is only needed when using the FlexVolume driver (required for Kubernetes 1.12 or earlier). The Ceph-CSI RBD driver or the Ceph-CSI CephFS driver are recommended for Kubernetes 1.13 and newer, making FlexVolume configuration redundant. - -If you want to configure volumes with the Flex driver instead of CSI, the Rook agent requires setup as a Flex volume plugin to manage the storage attachments in your cluster. -See the [Flex Volume Configuration](flexvolume.md) topic to configure your Kubernetes deployment to load the Rook volume plugin. - -### Extra agent mounts - -On certain distributions it may be necessary to mount additional directories into the agent container. That is what the environment variable `AGENT_MOUNTS` is for. Also see the documentation in [helm-operator](helm-operator.md) on the parameter `agent.mounts`. The format of the variable content should be `mountname1=/host/path1:/container/path1,mountname2=/host/path2:/container/path2`. - ## Kernel ### RBD @@ -77,7 +91,3 @@ or choose a different Linux distribution. If you will be creating volumes from a Ceph shared file system (CephFS), the recommended minimum kernel version is **4.17**. If you have a kernel version less than 4.17, the requested PVC sizes will not be enforced. Storage quotas will only be enforced on newer kernels. - -## Kernel modules directory configuration - -Normally, on Linux, kernel modules can be found in `/lib/modules`. However, there are some distributions that put them elsewhere. In that case the environment variable `LIB_MODULES_DIR_PATH` can be used to override the default. Also see the documentation in [helm-operator](helm-operator.md) on the parameter `agent.libModulesDirPath`. One notable distribution where this setting is useful would be [NixOS](https://nixos.org). diff --git a/Documentation/quickstart.md b/Documentation/quickstart.md index c04e1d484c28..51910b1ff496 100644 --- a/Documentation/quickstart.md +++ b/Documentation/quickstart.md @@ -1,21 +1,174 @@ --- title: Quickstart -weight: 200 +weight: 300 --- -# Quickstart Guides +{% include_relative branch.liquid %} -Welcome to Rook! We hope you have a great experience installing the Rook **cloud-native storage orchestrator** platform to enable highly available, durable storage -in your Kubernetes cluster. +# Ceph Quickstart + +Welcome to Rook! We hope you have a great experience installing the Rook **cloud-native storage orchestrator** platform to enable highly available, durable Ceph storage in your Kubernetes cluster. If you have any questions along the way, please don't hesitate to ask us in our [Slack channel](https://rook-io.slack.com). You can sign up for our Slack [here](https://slack.rook.io). -Rook provides a growing number of storage providers to a Kubernetes cluster, each with its own operator to deploy and manage the resources for the storage provider. +This guide will walk you through the basic setup of a Ceph cluster and enable you to consume block, object, and file storage +from other pods running in your cluster. + +## Minimum Version + +Kubernetes **v1.11** or higher is supported by Rook. + +**Important** If you are using K8s 1.15 or older, you will need to create a different version of the Rook CRDs. Create the `crds.yaml` found in the [pre-k8s-1.16](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/pre-k8s-1.16) subfolder of the example manifests. + +## Prerequisites + +To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [follow these instructions](pre-reqs.md). + +In order to configure the Ceph storage cluster, at least one of these local storage options are required: +- Raw devices (no partitions or formatted filesystems) + - This requires `lvm2` to be installed on the host. + To avoid this dependency, you can create a single full-disk partition on the disk (see below) +- Raw partitions (no formatted filesystem) +- Persistent Volumes available from a storage class in `block` mode + +## TL;DR + +A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). + +```console +$ git clone --single-branch --branch v1.7.8 https://github.com/rook/rook.git +cd rook/cluster/examples/kubernetes/ceph +kubectl create -f crds.yaml -f common.yaml -f operator.yaml +kubectl create -f cluster.yaml +``` + +After the cluster is running, you can create [block, object, or file](#storage) storage to be consumed by other applications in your cluster. + +## Deploy the Rook Operator + +The first step is to deploy the Rook operator. Check that you are using the [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph) that correspond to your release of Rook. For more options, see the [examples documentation](ceph-examples.md). + +```console +cd cluster/examples/kubernetes/ceph +kubectl create -f crds.yaml -f common.yaml -f operator.yaml + +# verify the rook-ceph-operator is in the `Running` state before proceeding +kubectl -n rook-ceph get pod +``` + +You can also deploy the operator with the [Rook Helm Chart](helm-operator.md). + +Before you start the operator in production, there are some settings that you may want to consider: +1. If you are using kubernetes v1.15 or older you need to create CRDs found here `/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crd.yaml`. + The apiextension v1beta1 version of CustomResourceDefinition was deprecated in Kubernetes v1.16. +2. Consider if you want to enable certain Rook features that are disabled by default. See the [operator.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/operator.yaml) for these and other advanced settings. + 1. Device discovery: Rook will watch for new devices to configure if the `ROOK_ENABLE_DISCOVERY_DAEMON` setting is enabled, commonly used in bare metal clusters. + 2. Flex driver: The flex driver is deprecated in favor of the CSI driver, but can still be enabled with the `ROOK_ENABLE_FLEX_DRIVER` setting. + 3. Node affinity and tolerations: The CSI driver by default will run on any node in the cluster. To configure the CSI driver affinity, several settings are available. + +If you wish to deploy into a namespace other than the default `rook-ceph`, see the +[Ceph advanced configuration section](ceph-advanced-configuration.md#using-alternate-namespaces) on the topic. + +## Cluster Environments + +The Rook documentation is focused around starting Rook in a production environment. Examples are also +provided to relax some settings for test environments. When creating the cluster later in this guide, consider these example cluster manifests: +- [cluster.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster.yaml): Cluster settings for a production cluster running on bare metal. Requires at least three worker nodes. +- [cluster-on-pvc.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml): Cluster settings for a production cluster running in a dynamic cloud environment. +- [cluster-test.yaml](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/cluster-test.yaml): Cluster settings for a test environment such as minikube. + +See the [Ceph examples](ceph-examples.md) for more details. + +## Create a Ceph Cluster + +Now that the Rook operator is running we can create the Ceph cluster. For the cluster to survive reboots, +make sure you set the `dataDirHostPath` property that is valid for your hosts. For more settings, see the documentation on [configuring the cluster](ceph-cluster-crd.md). + +Create the cluster: + +```console +kubectl create -f cluster.yaml +``` + +Use `kubectl` to list pods in the `rook-ceph` namespace. You should be able to see the following pods once they are all running. +The number of osd pods will depend on the number of nodes in the cluster and the number of devices configured. +If you did not modify the `cluster.yaml` above, it is expected that one OSD will be created per node. + +> If the `rook-ceph-mon`, `rook-ceph-mgr`, or `rook-ceph-osd` pods are not created, please refer to the +> [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. + +```console +kubectl -n rook-ceph get pod +``` + +>``` +>NAME READY STATUS RESTARTS AGE +>csi-cephfsplugin-provisioner-d77bb49c6-n5tgs 5/5 Running 0 140s +>csi-cephfsplugin-provisioner-d77bb49c6-v9rvn 5/5 Running 0 140s +>csi-cephfsplugin-rthrp 3/3 Running 0 140s +>csi-rbdplugin-hbsm7 3/3 Running 0 140s +>csi-rbdplugin-provisioner-5b5cd64fd-nvk6c 6/6 Running 0 140s +>csi-rbdplugin-provisioner-5b5cd64fd-q7bxl 6/6 Running 0 140s +>rook-ceph-crashcollector-minikube-5b57b7c5d4-hfldl 1/1 Running 0 105s +>rook-ceph-mgr-a-64cd7cdf54-j8b5p 1/1 Running 0 77s +>rook-ceph-mon-a-694bb7987d-fp9w7 1/1 Running 0 105s +>rook-ceph-mon-b-856fdd5cb9-5h2qk 1/1 Running 0 94s +>rook-ceph-mon-c-57545897fc-j576h 1/1 Running 0 85s +>rook-ceph-operator-85f5b946bd-s8grz 1/1 Running 0 92m +>rook-ceph-osd-0-6bb747b6c5-lnvb6 1/1 Running 0 23s +>rook-ceph-osd-1-7f67f9646d-44p7v 1/1 Running 0 24s +>rook-ceph-osd-2-6cd4b776ff-v4d68 1/1 Running 0 25s +>rook-ceph-osd-prepare-node1-vx2rz 0/2 Completed 0 60s +>rook-ceph-osd-prepare-node2-ab3fd 0/2 Completed 0 60s +>rook-ceph-osd-prepare-node3-w4xyz 0/2 Completed 0 60s +>``` + +To verify that the cluster is in a healthy state, connect to the [Rook toolbox](ceph-toolbox.md) and run the +`ceph status` command. + +* All mons should be in quorum +* A mgr should be active +* At least one OSD should be active +* If the health is not `HEALTH_OK`, the warnings or errors should be investigated + +```console +ceph status +``` +>``` +> cluster: +> id: a0452c76-30d9-4c1a-a948-5d8405f19a7c +> health: HEALTH_OK +> +> services: +> mon: 3 daemons, quorum a,b,c (age 3m) +> mgr: a(active, since 2m) +> osd: 3 osds: 3 up (since 1m), 3 in (since 1m) +>... +>``` + +If the cluster is not healthy, please refer to the [Ceph common issues](ceph-common-issues.md) for more details and potential solutions. + +## Storage + +For a walkthrough of the three types of storage exposed by Rook, see the guides for: + +* **[Block](ceph-block.md)**: Create block storage to be consumed by a pod (RWO) +* **[Shared Filesystem](ceph-filesystem.md)**: Create a filesystem to be shared across multiple pods (RWX) +* **[Object](ceph-object.md)**: Create an object store that is accessible inside or outside the Kubernetes cluster + +## Ceph Dashboard + +Ceph has a dashboard in which you can view the status of your cluster. Please see the [dashboard guide](ceph-dashboard.md) for more details. + +## Tools + +Create a toolbox pod for full access to a ceph admin client for debugging and troubleshooting your Rook cluster. Please see the [toolbox documentation](ceph-toolbox.md) for setup and usage information. Also see our [advanced configuration](ceph-advanced-configuration.md) document for helpful maintenance and tuning examples. + +## Monitoring + +Each Rook cluster has some built in metrics collectors/exporters for monitoring with [Prometheus](https://prometheus.io/). +To learn how to set up monitoring for your Rook cluster, you can follow the steps in the [monitoring guide](./ceph-monitoring.md). -**Follow these guides to get started with each provider**: +## Teardown -| Storage Provider | Status | Description | -| -------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Ceph](ceph-quickstart.md) | Stable / V1 | Ceph is a highly scalable distributed storage solution for block storage, object storage, and shared filesystems with years of production deployments. | -| [Cassandra](cassandra.md) | Alpha | Cassandra is a highly available NoSQL database featuring lightning fast performance, tunable consistency and massive scalability. | -| [NFS](nfs.md) | Alpha | NFS allows remote hosts to mount filesystems over a network and interact with those filesystems as though they are mounted locally. | +When you are done with the test cluster, see [these instructions](ceph-teardown.md) to clean up the cluster. diff --git a/Documentation/rbd-mirroring.md b/Documentation/rbd-mirroring.md new file mode 100644 index 000000000000..ba4d67dce8a3 --- /dev/null +++ b/Documentation/rbd-mirroring.md @@ -0,0 +1,424 @@ +--- +title: RBD Mirroring +weight: 3242 +indent: true +--- + +# RBD Mirroring +## Disaster Recovery + +Disaster recovery (DR) is an organization's ability to react to and recover from an incident that negatively affects business operations. +This plan comprises strategies for minimizing the consequences of a disaster, so an organization can continue to operate – or quickly resume the key operations. +Thus, disaster recovery is one of the aspects of [business continuity](https://en.wikipedia.org/wiki/Business_continuity_planning). +One of the solutions, to achieve the same, is [RBD mirroring](https://docs.ceph.com/en/latest/rbd/rbd-mirroring/). + +## RBD Mirroring + +[RBD mirroring](https://docs.ceph.com/en/latest/rbd/rbd-mirroring/) + is an asynchronous replication of RBD images between multiple Ceph clusters. + This capability is available in two modes: + +* Journal-based: Every write to the RBD image is first recorded + to the associated journal before modifying the actual image. + The remote cluster will read from this associated journal and + replay the updates to its local image. +* Snapshot-based: This mode uses periodically scheduled or + manually created RBD image mirror-snapshots to replicate + crash-consistent RBD images between clusters. + +> **Note**: This document sheds light on rbd mirroring and how to set it up using rook. +> For steps on failover or failback scenarios + +## Table of Contents + +* [Create RBD Pools](#create-rbd-pools) +* [Bootstrap Peers](#bootstrap-peers) +* [Configure the RBDMirror Daemon](#configure-the-rbdmirror-daemon) +* [Add mirroring peer information to RBD pools](#add-mirroring-peer-information-to-rbd-pools) +* [Enable CSI Replication Sidecars](#enable-csi-replication-sidecars) +* [Volume Replication Custom Resources](#volume-replication-custom-resources) +* [Enable mirroring on a PVC](#enable-mirroring-on-a-pvc) + * [Creating a VolumeReplicationClass CR](#create-a-volume-replication-class-cr) + * [Creating a VolumeReplications CR](#create-a-volumereplication-cr) + * [Check VolumeReplication CR status](async-disaster-recovery.md#checking-replication-status) +* [Backup and Restore](#backup-&-restore) + +## Create RBD Pools + +In this section, we create specific RBD pools that are RBD mirroring + enabled for use with the DR use case. + +Execute the following steps on each peer cluster to create mirror + enabled pools: + +* Create a RBD pool that is enabled for mirroring by adding the section + `spec.mirroring` in the CephBlockPool CR: + +```yaml +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: mirroredpool + namespace: rook-ceph +spec: + replicated: + size: 1 + mirroring: + enabled: true + mode: image +``` + +```bash +kubectl create -f pool-mirrored.yaml +``` + +* Repeat the steps on the peer cluster. + +> **NOTE:** Pool name across the cluster peers must be the same +> for RBD replication to function. + +See the [CephBlockPool documentation](ceph-pool-crd.md#mirroring) for more details. + +> **Note:** It is also feasible to edit existing pools and +> enable them for replication. + +## Bootstrap Peers + +In order for the rbd-mirror daemon to discover its peer cluster, the + peer must be registered and a user account must be created. + +The following steps enable bootstrapping peers to discover and + authenticate to each other: + +* For Bootstrapping a peer cluster its bootstrap secret is required. To determine the name of the secret that contains the bootstrap secret execute the following command on the remote cluster (cluster-2) + +```bash +[cluster-2]$ kubectl get cephblockpool.ceph.rook.io/mirroredpool -n rook-ceph -ojsonpath='{.status.info.rbdMirrorBootstrapPeerSecretName}' +``` + +Here, `pool-peer-token-mirroredpool` is the desired bootstrap secret name. + +* The secret pool-peer-token-mirroredpool contains all the information related to the token and needs to be injected to the peer, to fetch the decoded secret: + +```bash +[cluster-2]$ kubectl get secret -n rook-ceph pool-peer-token-mirroredpool -o jsonpath='{.data.token}'|base64 -d +``` + +> ```bash +>eyJmc2lkIjoiNGQ1YmNiNDAtNDY3YS00OWVkLThjMGEtOWVhOGJkNDY2OTE3IiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFDZ3hmZGdxN013R0JBQWZzcUtCaGpZVjJUZDRxVzJYQm5kemc9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMzkuMzY6MzMwMCx2MToxOTIuMTY4LjM5LjM2OjY3ODldIn0= +> ``` + +* With this Decoded value, create a secret on the primary site (cluster-1): + +```bash +[cluster-1]$ kubectl -n rook-ceph create secret generic rbd-primary-site-secret --from-literal=token=eyJmc2lkIjoiNGQ1YmNiNDAtNDY3YS00OWVkLThjMGEtOWVhOGJkNDY2OTE3IiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFDZ3hmZGdxN013R0JBQWZzcUtCaGpZVjJUZDRxVzJYQm5kemc9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMzkuMzY6MzMwMCx2MToxOTIuMTY4LjM5LjM2OjY3ODldIn0= --from-literal=pool=mirroredpool +``` + +* This completes the bootstrap process for cluster-1 to be peered with cluster-2. +* Repeat the process switching cluster-2 in place of cluster-1, to complete the bootstrap process across both peer clusters. + +For more details, refer to the official rbd mirror documentation on + [how to create a bootstrap peer](https://docs.ceph.com/en/latest/rbd/rbd-mirroring/#bootstrap-peers). + +## Configure the RBDMirror Daemon + +Replication is handled by the rbd-mirror daemon. The rbd-mirror daemon + is responsible for pulling image updates from the remote, peer cluster, + and applying them to image within the local cluster. + +Creation of the rbd-mirror daemon(s) is done through the custom resource + definitions (CRDs), as follows: + +* Create mirror.yaml, to deploy the rbd-mirror daemon + +```yaml +apiVersion: ceph.rook.io/v1 +kind: CephRBDMirror +metadata: + name: my-rbd-mirror + namespace: openshift-storage +spec: + # the number of rbd-mirror daemons to deploy + count: 1 +``` + +* Create the RBD mirror daemon + +```bash +[cluster-1]$ kubectl create -f mirror.yaml -n rook-ceph +``` + +* Validate if `rbd-mirror` daemon pod is now up + +```bash +[cluster-1]$ kubectl get pods -n rook-ceph +``` + +> ```bash +> rook-ceph-rbd-mirror-a-6985b47c8c-dpv4k 1/1 Running 0 10s +> ``` + +* Verify that daemon health is OK + +```bash +kubectl get cephblockpools.ceph.rook.io mirroredpool -n rook-ceph -o jsonpath='{.status.mirroringStatus.summary}' +``` + +> ```bash +> {"daemon_health":"OK","health":"OK","image_health":"OK","states":{"replaying":1}} +> ``` + +* Repeat the above steps on the peer cluster. + + See the [CephRBDMirror CRD](ceph-rbd-mirror-crd.md) for more details on the mirroring settings. + + +## Add mirroring peer information to RBD pools + +Each pool can have its own peer. To add the peer information, patch the already created mirroring enabled pool +to update the CephBlockPool CRD. + +```bash +[cluster-1]$ kubectl -n rook-ceph patch cephblockpool mirroredpool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["rbd-primary-site-secret"]}}}}' +``` +## Create VolmeReplication CRDs + +Volume Replication Operator follows controller pattern and provides extended +APIs for storage disaster recovery. The extended APIs are provided via Custom +Resource Definition(CRD). Create the VolumeReplication CRDs on all the peer clusters. + +```bash +$ kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml + +$ kubectl create -f https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml +``` + +## Enable CSI Replication Sidecars + +To achieve RBD Mirroring, `csi-omap-generator` and `volume-replication` + containers need to be deployed in the RBD provisioner pods, which are not enabled by default. + +* **Omap Generator**: Omap generator is a sidecar container that when + deployed with the CSI provisioner pod, generates the internal CSI + omaps between the PV and the RBD image. This is required as static PVs are + transferred across peer clusters in the DR use case, and hence + is needed to preserve PVC to storage mappings. + +* **Volume Replication Operator**: Volume Replication Operator is a + kubernetes operator that provides common and reusable APIs for + storage disaster recovery. + It is based on [csi-addons/spec](https://github.com/csi-addons/spec) + specification and can be used by any storage provider. + For more details, refer to [volume replication operator](https://github.com/csi-addons/volume-replication-operator). + +Execute the following steps on each peer cluster to enable the + OMap generator and Volume Replication sidecars: + +* Edit the `rook-ceph-operator-config` configmap and add the + following configurations + +```bash +kubectl edit cm rook-ceph-operator-config -n rook-ceph +``` + +Add the following properties if not present: + +```yaml +data: + CSI_ENABLE_OMAP_GENERATOR: "true" + CSI_ENABLE_VOLUME_REPLICATION: "true" +``` + +* After updating the configmap with those settings, two new sidecars + should now start automatically in the CSI provisioner pod. +* Repeat the steps on the peer cluster. + +## Volume Replication Custom Resources + +VolumeReplication CRDs provide support for two custom resources: + +* **VolumeReplicationClass**: *VolumeReplicationClass* is a cluster scoped +resource that contains driver related configuration parameters. It holds +the storage admin information required for the volume replication operator. + +* **VolumeReplication**: *VolumeReplication* is a namespaced resource that contains references to storage object to be replicated and VolumeReplicationClass +corresponding to the driver providing replication. + +> For more information, please refer to the +> [volume-replication-operator](https://github.com/csi-addons/volume-replication-operator). + +## Enable mirroring on a PVC + +Below guide assumes that we have a PVC (rbd-pvc) in BOUND state; created using + *StorageClass* with `Retain` reclaimPolicy. + +```bash +[cluster-1]$ kubectl get pvc +``` + +> +> ```bash +> NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +> rbd-pvc Bound pvc-65dc0aac-5e15-4474-90f4-7a3532c621ec 1Gi RWO csi-rbd-sc 44s +> ``` + +### Create a Volume Replication Class CR + +In this case, we create a Volume Replication Class on cluster-1 + +```bash +[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication-class.yaml +``` + +> **Note:** The `schedulingInterval` can be specified in formats of +> minutes, hours or days using suffix `m`,`h` and `d` respectively. +> The optional schedulingStartTime can be specified using the ISO 8601 +> time format. + +### Create a VolumeReplication CR + +* Once VolumeReplicationClass is created, create a Volume Replication for + the PVC which we intend to replicate to secondary cluster. + +```bash +[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication.yaml +``` + +>:memo: *VolumeReplication* is a namespace scoped object. Thus, +> it should be created in the same namespace as of PVC. + +### Checking Replication Status + +`replicationState` is the state of the volume being referenced. + Possible values are primary, secondary, and resync. + +* `primary` denotes that the volume is primary. +* `secondary` denotes that the volume is secondary. +* `resync` denotes that the volume needs to be resynced. + +To check VolumeReplication CR status: + +```bash +[cluster-1]$kubectl get volumereplication pvc-volumereplication -oyaml +``` + +>```yaml +>... +>spec: +> dataSource: +> apiGroup: "" +> kind: PersistentVolumeClaim +> name: rbd-pvc +> replicationState: primary +> volumeReplicationClass: rbd-volumereplicationclass +>status: +> conditions: +> - lastTransitionTime: "2021-05-04T07:39:00Z" +> message: "" +> observedGeneration: 1 +> reason: Promoted +> status: "True" +> type: Completed +> - lastTransitionTime: "2021-05-04T07:39:00Z" +> message: "" +> observedGeneration: 1 +> reason: Healthy +> status: "False" +> type: Degraded +> - lastTransitionTime: "2021-05-04T07:39:00Z" +> message: "" +> observedGeneration: 1 +> reason: NotResyncing +> status: "False" +> type: Resyncing +> lastCompletionTime: "2021-05-04T07:39:00Z" +> lastStartTime: "2021-05-04T07:38:59Z" +> message: volume is marked primary +> observedGeneration: 1 +> state: Primary +>``` + +## Backup & Restore + +> **NOTE:** To effectively resume operations after a failover/relocation, +> backup of the kubernetes artifacts like deployment, PVC, PV, etc need to be created beforehand by the admin; so that the application can be restored on the peer cluster. + +Here, we take a backup of PVC and PV object on one site, so that they can be restored later to the peer cluster. + +#### **Take backup on cluster-1** + +* Take backup of the PVC `rbd-pvc` + +```bash +[cluster-1]$ kubectl get pvc rbd-pvc -oyaml > pvc-backup.yaml +``` + +* Take a backup of the PV, corresponding to the PVC + +```bash +[cluster-1]$ kubectl get pv/pvc-65dc0aac-5e15-4474-90f4-7a3532c621ec -oyaml > pv_backup.yaml +``` + +> **Note**: We can also take backup using external tools like **Velero**. +> See [velero documentation](https://velero.io/docs/main/) for more information. + +#### **Restore the backup on cluster-2** + +* Create storageclass on the secondary cluster + +```bash +[cluster-2]$ kubectl create -f examples/rbd/storageclass.yaml +``` + +* Create VolumeReplicationClass on the secondary cluster + +```bash +[cluster-1]$ kubectl apply -f cluster/examples/kubernetes/ceph/volume-replication-class.yaml + ``` + +> ```bash +> volumereplicationclass.replication.storage.openshift.io/rbd-volumereplicationclass created +> ``` + +* If Persistent Volumes and Claims are created manually on the secondary cluster, + remove the `claimRef` on the backed up PV objects in yaml files; so that the + PV can get bound to the new claim on the secondary cluster. + +```yaml +... +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: rbd-pvc + namespace: default + resourceVersion: "64252" + uid: 65dc0aac-5e15-4474-90f4-7a3532c621ec + csi: +... +``` + +* Apply the Persistent Volume backup from the primary cluster + +```bash +[cluster-2]$ kubectl create -f pv-backup.yaml +``` + +* Apply the Persistent Volume claim from the restored backup + +```bash +[cluster-2]$ kubectl create -f pvc-backup.yaml +``` + +```bash +[cluster-2]$ kubectl get pvc +``` + +> ```bash +> NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +> rbd-pvc Bound pvc-65dc0aac-5e15-4474-90f4-7a3532c621ec 1Gi RWO rook-ceph-block 44s +> ``` diff --git a/Documentation/tectonic.md b/Documentation/tectonic.md deleted file mode 100644 index 23ec6440dd88..000000000000 --- a/Documentation/tectonic.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Tectonic Configuration -weight: 11800 -indent: true ---- - -# Tectonic Configuration - -Here is a running guide on how to implement Rook on Tectonic. A complete guide on how to install Tectonic is out of the scope of the Rook project. More info can be found on the [Tectonic website](https://coreos.com/tectonic/docs/latest/) - -## Prerequisites - -* An installed tectonic-installer. These steps are described on [the Tectonic website](https://coreos.com/tectonic/docs/latest/install/bare-metal/#4-tectonic-installer) -* A running matchbox node which will do the provisioning (Matchbox is only required if you are running Tectonic on Bare metal) -* You can run through all steps of the GUI installer, but in the last step, choose `Boot manually`. This way we can make the necessary changes first. - -## Edit the kubelet.service file -We need to make a few adaptions to the Kubelet systemd service file generated by the Tectonic-installer. - -First change to the directory in which you untarred the tectonic installer and find your newly generated cluster configuration files. - -```console -cd ~/tectonic/tectonic-installer/LINUX-OR-DARWIN/clusters -``` - - -Open the file `modules/ignition/resources/services/kubelet.service` in your favorite editor and after the last line containing `ExecStartPre=...`, paste the following extra lines: - -```console -ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins -ExecStartPre=/bin/mkdir -p /var/lib/rook -``` - -And after the `ExecStart=/usr/lib/coreos/kubelet-wrapper \` line, insert the following flag for the kubelet-wrapper to point to a path reachable outside of the Kubelet rkt container: - -```console ---volume-plugin-dir=/var/lib/kubelet/volumeplugins \ -``` - -Save and close the file. - -### Boot your Tectonic cluster - -All the preparations are ready for Tectonic to boot now. We will use `terraform` to start the cluster. -Visit the official [Tectonic manual boot](https://coreos.com/tectonic/docs/latest/install/aws/manual-boot.html#deploy-the-cluster) page for the commands to use. - -**Remark:** The Tectonic installer contains the correct terraform binary out of the box. This terraform binary can be found in following directory `~/tectonic/tectonic-installer/linux`. - -## Start Rook - -After the Tectonic Installer ran and the Kubernetes cluster is started and ready, you can follow the [Rook installation guide](ceph-quickstart.md). -If you want to specify which disks Rook uses, follow the instructions in [creating Rook clusters](ceph-cluster-crd.md) diff --git a/Makefile b/Makefile index 04fe898bb617..6ba14c57f39d 100644 --- a/Makefile +++ b/Makefile @@ -109,6 +109,7 @@ build.version: build.common: build.version helm.build mod.check @$(MAKE) go.init @$(MAKE) go.validate + @$(MAKE) -C images/ceph list-image do.build.platform.%: @$(MAKE) PLATFORM=$* go.build @@ -171,7 +172,7 @@ csv-ceph: csv-clean crds ## Generate a CSV file for OLM. $(MAKE) -C images/ceph csv csv-clean: ## Remove existing OLM files. - $(MAKE) -C images/ceph csv-clean + @$(MAKE) -C images/ceph csv-clean crds: $(CONTROLLER_GEN) $(YQ) @echo Updating CRD manifests diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index 95c36d26149a..ec2aabaeb13b 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -40,6 +40,8 @@ So the CephCLuster spec field `image` must be updated to point to quay, like `im - Add support for Kubernetes TLS secret for referring TLS certs needed for ceph RGW server. - Stretch clusters are considered stable - Ceph v16.2.5 or greater is required for stretch clusters +- The use of peer secret names in CephRBDMirror is deprecated. Please use CephBlockPool CR to configure peer secret names and import peers. Checkout the `mirroring` section in the CephBlockPool [spec](Documentation/ceph-pool-crd.md#spec) for more details. +- Update Ceph CSI to `v3.4.0` for more details read the [official release note](https://github.com/ceph/ceph-csi/releases/tag/v3.4.0) ### Cassandra diff --git a/build/codegen/codegen.sh b/build/codegen/codegen.sh index 71deb835100c..6c07503f21f7 100755 --- a/build/codegen/codegen.sh +++ b/build/codegen/codegen.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -GROUP_VERSIONS="rook.io:v1alpha2 ceph.rook.io:v1 nfs.rook.io:v1alpha1 cassandra.rook.io:v1alpha1" +GROUP_VERSIONS="rook.io:v1alpha2 ceph.rook.io:v1" scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" diff --git a/build/crds/build-crds.sh b/build/crds/build-crds.sh index 70bdd12d15d0..dcaa12857e25 100755 --- a/build/crds/build-crds.sh +++ b/build/crds/build-crds.sh @@ -17,18 +17,24 @@ set -o errexit set -o pipefail +# set BUILD_CRDS_INTO_DIR to build the CRD results into the given dir instead of in-place +: "${BUILD_CRDS_INTO_DIR:=}" + SCRIPT_ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd -P) CONTROLLER_GEN_BIN_PATH=$1 YQ_BIN_PATH=$2 : "${MAX_DESC_LEN:=-1}" # allowDangerousTypes is used to accept float64 CRD_OPTIONS="crd:maxDescLen=$MAX_DESC_LEN,trivialVersions=true,generateEmbeddedObjectMeta=true,allowDangerousTypes=true" -OLM_CATALOG_DIR="${SCRIPT_ROOT}/cluster/olm/ceph/deploy/crds" -CEPH_CRDS_FILE_PATH="${SCRIPT_ROOT}/cluster/examples/kubernetes/ceph/crds.yaml" -CEPH_HELM_CRDS_FILE_PATH="${SCRIPT_ROOT}/cluster/charts/rook-ceph/templates/resources.yaml" -CEPH_CRDS_BEFORE_1_16_FILE_PATH="${SCRIPT_ROOT}/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml" -CASSANDRA_CRDS_DIR="${SCRIPT_ROOT}/cluster/examples/kubernetes/cassandra" -NFS_CRDS_DIR="${SCRIPT_ROOT}/cluster/examples/kubernetes/nfs" + +DESTINATION_ROOT="$SCRIPT_ROOT" +if [[ -n "$BUILD_CRDS_INTO_DIR" ]]; then + echo "Generating CRDs into dir $BUILD_CRDS_INTO_DIR" + DESTINATION_ROOT="$BUILD_CRDS_INTO_DIR" +fi +OLM_CATALOG_DIR="${DESTINATION_ROOT}/cluster/olm/ceph/deploy/crds" +CEPH_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/examples/kubernetes/ceph/crds.yaml" +CEPH_HELM_CRDS_FILE_PATH="${DESTINATION_ROOT}/cluster/charts/rook-ceph/templates/resources.yaml" ############# # FUNCTIONS # @@ -44,19 +50,7 @@ generating_crds_v1() { echo "Generating ceph crds" "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/ceph.rook.io/v1" output:crd:artifacts:config="$OLM_CATALOG_DIR" # the csv upgrade is failing on the volumeClaimTemplate.metadata.annotations.crushDeviceClass unless we preserve the annotations as an unknown field - $YQ_BIN_PATH w -i cluster/olm/ceph/deploy/crds/ceph.rook.io_cephclusters.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.storage.properties.storageClassDeviceSets.items.properties.volumeClaimTemplates.items.properties.metadata.properties.annotations.x-kubernetes-preserve-unknown-fields true - - echo "Generating cassandra crds" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/cassandra.rook.io/v1alpha1" output:crd:artifacts:config="$CASSANDRA_CRDS_DIR" - # Format with yq for consistent whitespace - $YQ_BIN_PATH read $CASSANDRA_CRDS_DIR/cassandra.rook.io_clusters.yaml > $CASSANDRA_CRDS_DIR/crds.yaml - rm -f $CASSANDRA_CRDS_DIR/cassandra.rook.io_clusters.yaml - - echo "Generating nfs crds" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./pkg/apis/nfs.rook.io/v1alpha1" output:crd:artifacts:config="$NFS_CRDS_DIR" - # Format with yq for consistent whitespace - $YQ_BIN_PATH read $NFS_CRDS_DIR/nfs.rook.io_nfsservers.yaml > $NFS_CRDS_DIR/crds.yaml - rm -f $NFS_CRDS_DIR/nfs.rook.io_nfsservers.yaml + $YQ_BIN_PATH w -i "${OLM_CATALOG_DIR}"/ceph.rook.io_cephclusters.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.storage.properties.storageClassDeviceSets.items.properties.volumeClaimTemplates.items.properties.metadata.properties.annotations.x-kubernetes-preserve-unknown-fields true } generating_crds_v1alpha2() { @@ -68,11 +62,6 @@ generating_crds_v1alpha2() { # "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="./vendor/github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" output:crd:artifacts:config="$OLM_CATALOG_DIR" } -generate_vol_rep_crds() { - echo "Generating volume replication crds in crds.yaml" - "$CONTROLLER_GEN_BIN_PATH" "$CRD_OPTIONS" paths="github.com/csi-addons/volume-replication-operator/api/v1alpha1" output:crd:artifacts:config="$OLM_CATALOG_DIR" -} - generating_main_crd() { true > "$CEPH_CRDS_FILE_PATH" true > "$CEPH_HELM_CRDS_FILE_PATH" @@ -89,7 +78,7 @@ build_helm_resources() { { # add header echo "{{- if .Values.crds.enabled }}" - echo "{{- if semverCompare \">=1.16.0\" .Capabilities.KubeVersion.GitVersion }}" + echo "{{- if semverCompare \">=1.16.0-0\" .Capabilities.KubeVersion.GitVersion }}" # Add helm annotations to all CRDS and skip the first 4 lines of crds.yaml "$YQ_BIN_PATH" w -d'*' "$CEPH_CRDS_FILE_PATH" "metadata.annotations[helm.sh/resource-policy]" keep | tail -n +5 @@ -98,7 +87,7 @@ build_helm_resources() { echo "{{- else }}" # add footer - cat "$CEPH_CRDS_BEFORE_1_16_FILE_PATH" + cat "${SCRIPT_ROOT}/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml" # DO NOT REMOVE the empty line, it is necessary echo "" echo "{{- end }}" @@ -117,8 +106,6 @@ if [ -z "$NO_OB_OBC_VOL_GEN" ]; then generating_crds_v1alpha2 fi -generate_vol_rep_crds - generating_main_crd for crd in "$OLM_CATALOG_DIR/"*.yaml; do diff --git a/build/crds/crds.go b/build/crds/crds.go deleted file mode 100644 index 1bcaee88b3be..000000000000 --- a/build/crds/crds.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build crds - -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package imports the required code to build the volume replication CRDs with 'make crds' -package crds - -import _ "github.com/csi-addons/volume-replication-operator" diff --git a/build/makelib/common.mk b/build/makelib/common.mk index 26484e65939e..66387c98920f 100644 --- a/build/makelib/common.mk +++ b/build/makelib/common.mk @@ -64,10 +64,6 @@ CXX := $(CROSS_TRIPLE)-g++ export CC CXX endif -# sed -i'' -e works on both UNIX (MacOS) and GNU (Linux) versions of sed -SED_CMD ?= sed -i'' -e -export SED_CMD - # set the version number. you should not need to do this # for the majority of scenarios. ifeq ($(origin VERSION), undefined) @@ -106,12 +102,15 @@ ifeq ($(BUILD_REGISTRY),build-) $(error Failed to get unique ID for host+dir. Check that '$(SHA256CMD)' functions or override SHA256CMD) endif +SED_IN_PLACE = $(ROOT_DIR)/build/sed-in-place +export SED_IN_PLACE + # This is a neat little target that prints any variable value from the Makefile # Usage: make echo.IMAGES echo.PLATFORM echo.%: ; @echo $* = $($*) # Select which images (backends) to make; default to all possible images -IMAGES ?= ceph nfs cassandra +IMAGES ?= ceph COMMA := , SPACE := diff --git a/build/makelib/golang.mk b/build/makelib/golang.mk index dfb91260eb77..38296d70c107 100644 --- a/build/makelib/golang.mk +++ b/build/makelib/golang.mk @@ -72,9 +72,7 @@ GOHOST := GOOS=$(GOHOSTOS) GOARCH=$(GOHOSTARCH) go GO_VERSION := $(shell $(GO) version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\{0,4\}[0-9][^.]\).*/\1/p') GO_FULL_VERSION := $(shell $(GO) version) -# we use a consistent version of gofmt even while running different go compilers. -# see https://github.com/golang/go/issues/26397 for more details -GOFMT_VERSION := 1.11 +GOFMT_VERSION := $(GO_VERSION) ifneq ($(findstring $(GOFMT_VERSION),$(GO_VERSION)),) GOFMT := $(shell which gofmt) else @@ -119,6 +117,7 @@ go.init: .PHONY: go.build go.build: @echo === go build $(PLATFORM) + $(info Go version: $(shell $(GO) version)) $(foreach p,$(GO_STATIC_PACKAGES),@CGO_ENABLED=0 $(GO) build -v -o $(GO_OUT_DIR)/$(lastword $(subst /, ,$(p)))$(GO_OUT_EXT) $(GO_STATIC_FLAGS) $(p)${\n}) $(foreach p,$(GO_TEST_PACKAGES),@CGO_ENABLED=0 $(GO) test -v -c -o $(GO_TEST_OUTPUT)/$(lastword $(subst /, ,$(p)))$(GO_OUT_EXT) $(GO_STATIC_FLAGS) $(p)${\n}) diff --git a/build/makelib/helm.mk b/build/makelib/helm.mk index 79da19741cda..ed5720de4ba0 100644 --- a/build/makelib/helm.mk +++ b/build/makelib/helm.mk @@ -39,9 +39,9 @@ define helm.chart $(HELM_OUTPUT_DIR)/$(1)-$(VERSION).tgz: $(HELM) $(HELM_OUTPUT_DIR) $(shell find $(HELM_CHARTS_DIR)/$(1) -type f) @echo === helm package $(1) @cp -r $(HELM_CHARTS_DIR)/$(1) $(OUTPUT_DIR) - @$(SED_CMD) 's|VERSION|$(VERSION)|g' $(OUTPUT_DIR)/$(1)/values.yaml + @$(SED_IN_PLACE) 's|VERSION|$(VERSION)|g' $(OUTPUT_DIR)/$(1)/values.yaml @$(HELM) lint $(abspath $(OUTPUT_DIR)/$(1)) --set image.tag=$(VERSION) - @$(HELM) package --version $(VERSION) -d $(HELM_OUTPUT_DIR) $(abspath $(OUTPUT_DIR)/$(1)) + @$(HELM) package --version $(VERSION) --app-version $(VERSION) -d $(HELM_OUTPUT_DIR) $(abspath $(OUTPUT_DIR)/$(1)) $(HELM_INDEX): $(HELM_OUTPUT_DIR)/$(1)-$(VERSION).tgz endef $(foreach p,$(HELM_CHARTS),$(eval $(call helm.chart,$(p)))) diff --git a/build/sed-in-place b/build/sed-in-place new file mode 100755 index 000000000000..596b1563a209 --- /dev/null +++ b/build/sed-in-place @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -eEuo pipefail + +# sed is NOT portable across OSes + +# sed -i '' does in-place on Mac, BSD, and other POSIX-compliant OSes +# sed -i '' does not work with GNU sed, but sed -i (without small quotes) does + +# assume that sed is not GNU sed initially +SED=(sed -i '') + +if sed --help &>/dev/null; then + # if sed doesn't have help text, it isn't GNU sed + if [[ $(sed --help 2>&1) == *GNU* ]]; then + SED=(sed -i) + fi +fi + +# sed -e is required on Mac/BSD if the -i option is used +# sed -e is not required but is supported by GNU sed +# Therefore, this script supplies -e it unless the first argument to this script is a flag +if [[ $1 != -* ]]; then + SED+=(-e) +fi + +"${SED[@]}" "${@}" diff --git a/cluster/charts/rook-ceph-cluster/Chart.yaml b/cluster/charts/rook-ceph-cluster/Chart.yaml index cc2b48f9bb35..b282613cdb68 100644 --- a/cluster/charts/rook-ceph-cluster/Chart.yaml +++ b/cluster/charts/rook-ceph-cluster/Chart.yaml @@ -2,6 +2,7 @@ apiVersion: v2 description: Manages a single Ceph cluster namespace for Rook name: rook-ceph-cluster version: 0.0.1 +appVersion: 0.0.1 icon: https://rook.io/images/rook-logo.svg sources: - https://github.com/rook/rook diff --git a/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl b/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl index 529b4901755e..8a7cf525d6a0 100644 --- a/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl +++ b/cluster/charts/rook-ceph-cluster/templates/_helpers.tpl @@ -24,3 +24,10 @@ imagePullSecrets: {{ toYaml .Values.imagePullSecrets }} {{- end -}} {{- end -}} + +{{/* +Define the clusterName as defaulting to the release namespace +*/}} +{{- define "clusterName" -}} +{{ .Values.clusterName | default .Release.Namespace }} +{{- end -}} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml b/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml new file mode 100644 index 000000000000..41856f5a5287 --- /dev/null +++ b/cluster/charts/rook-ceph-cluster/templates/cephblockpool.yaml @@ -0,0 +1,26 @@ +{{- $root := . -}} +{{- range $blockpool := .Values.cephBlockPools -}} +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: {{ $blockpool.name }} +spec: +{{ toYaml $blockpool.spec | indent 2 }} +--- +{{- if default false $blockpool.storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ $blockpool.storageClass.name }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ if default false $blockpool.storageClass.isDefault }}true{{ else }}false{{ end }}" +provisioner: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com +parameters: + pool: {{ $blockpool.name }} + clusterID: {{ $root.Release.Namespace }} +{{ toYaml $blockpool.storageClass.parameters | indent 2 }} +reclaimPolicy: {{ default "Delete" $blockpool.storageClass.reclaimPolicy }} +allowVolumeExpansion: {{ default "true" $blockpool.storageClass.allowVolumeExpansion }} +{{ end }} +{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml new file mode 100644 index 000000000000..5c5646bef13a --- /dev/null +++ b/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml @@ -0,0 +1,27 @@ +{{- $root := . -}} +{{- range $filesystem := .Values.cephFileSystems -}} +--- +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: {{ $filesystem.name }} +spec: +{{ toYaml $filesystem.spec | indent 2 }} +--- +{{- if default false $filesystem.storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ $filesystem.storageClass.name }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ if default false $filesystem.storageClass.isDefault }}true{{ else }}false{{ end }}" +provisioner: {{ $root.Values.operatorNamespace }}.cephfs.csi.ceph.com +parameters: + fsName: {{ $filesystem.name }} + pool: {{ $filesystem.name }}-data0 + clusterID: {{ $root.Release.Namespace }} +{{ toYaml $filesystem.storageClass.parameters | indent 2 }} +reclaimPolicy: {{ default "Delete" $filesystem.storageClass.reclaimPolicy }} +allowVolumeExpansion: {{ default "true" $filesystem.storageClass.allowVolumeExpansion }} +{{ end }} +{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml b/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml new file mode 100644 index 000000000000..21177f32b067 --- /dev/null +++ b/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml @@ -0,0 +1,23 @@ +{{- $root := . -}} +{{- range $objectstore := .Values.cephObjectStores -}} +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: {{ $objectstore.name }} +spec: +{{ toYaml $objectstore.spec | indent 2 }} +--- +{{- if default false $objectstore.storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ $objectstore.storageClass.name }} +provisioner: {{ $root.Release.Namespace }}.ceph.rook.io/bucket +reclaimPolicy: {{ default "Delete" $objectstore.storageClass.reclaimPolicy }} +parameters: + objectStoreName: {{ $objectstore.name }} + objectStoreNamespace: {{ $root.Release.Namespace }} +{{ toYaml $objectstore.storageClass.parameters | indent 2 }} +{{ end }} +{{ end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/configmap.yaml b/cluster/charts/rook-ceph-cluster/templates/configmap.yaml index 65f987979d65..3586ed856f61 100644 --- a/cluster/charts/rook-ceph-cluster/templates/configmap.yaml +++ b/cluster/charts/rook-ceph-cluster/templates/configmap.yaml @@ -1,4 +1,5 @@ {{- if .Values.configOverride }} +--- kind: ConfigMap apiVersion: v1 metadata: diff --git a/cluster/charts/rook-ceph-cluster/templates/deployment.yaml b/cluster/charts/rook-ceph-cluster/templates/deployment.yaml index ec7fa9f2b2be..7e99948b293d 100644 --- a/cluster/charts/rook-ceph-cluster/templates/deployment.yaml +++ b/cluster/charts/rook-ceph-cluster/templates/deployment.yaml @@ -1,4 +1,5 @@ {{- if .Values.toolbox.enabled }} +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/cluster/charts/rook-ceph-cluster/templates/ingress.yaml b/cluster/charts/rook-ceph-cluster/templates/ingress.yaml new file mode 100644 index 000000000000..d665de5f28fd --- /dev/null +++ b/cluster/charts/rook-ceph-cluster/templates/ingress.yaml @@ -0,0 +1,44 @@ +{{- if .Values.ingress.dashboard.host }} +--- +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }} +apiVersion: networking.k8s.io/v1 +{{ else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: + name: {{ template "clusterName" . }}-dashboard + {{- if .Values.ingress.dashboard.annotations }} + annotations: {{- toYaml .Values.ingress.dashboard.annotations | nindent 4 }} + {{- end }} +spec: + rules: + - host: {{ .Values.ingress.dashboard.host.name }} + http: + paths: + - path: {{ .Values.ingress.dashboard.host.path | default "/" }} + backend: +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }} + service: + name: rook-ceph-mgr-dashboard + port: + {{- if .Values.cephClusterSpec.dashboard.ssl }} + name: https-dashboard + {{- else }} + name: http-dashboard + {{- end }} + pathType: Prefix +{{- else }} + serviceName: rook-ceph-mgr-dashboard + {{- if .Values.cephClusterSpec.dashboard.ssl }} + servicePort: https-dashboard + {{- else }} + servicePort: http-dashboard + {{- end }} +{{- end }} + {{- if .Values.ingress.dashboard.tls }} + tls: {{- toYaml .Values.ingress.dashboard.tls | nindent 4 }} + {{- end }} +{{- end }} diff --git a/cluster/charts/rook-ceph-cluster/templates/role.yaml b/cluster/charts/rook-ceph-cluster/templates/role.yaml index 36719cab5c88..af88cd8fb8ff 100644 --- a/cluster/charts/rook-ceph-cluster/templates/role.yaml +++ b/cluster/charts/rook-ceph-cluster/templates/role.yaml @@ -82,7 +82,7 @@ rules: verbs: ["get", "list", "delete" ] - apiGroups: [""] resources: ["persistentvolumeclaims"] - verbs: ["delete"] + verbs: ["get", "update", "delete"] {{- if .Values.monitoring.enabled }} --- diff --git a/cluster/charts/rook-ceph-cluster/values.yaml b/cluster/charts/rook-ceph-cluster/values.yaml index 452900aa1e66..5a878fb941b3 100644 --- a/cluster/charts/rook-ceph-cluster/values.yaml +++ b/cluster/charts/rook-ceph-cluster/values.yaml @@ -6,15 +6,14 @@ operatorNamespace: rook-ceph # The metadata.name of the CephCluster CR. The default name is the same as the namespace. -#clusterName: rook-ceph +# clusterName: rook-ceph # Ability to override ceph.conf -#configOverride: | -# [global] -# mon_allow_pool_delete = true -# -# osd_pool_default_size = 3 -# osd_pool_default_min_size = 2 +# configOverride: | +# [global] +# mon_allow_pool_delete = true +# osd_pool_default_size = 3 +# osd_pool_default_min_size = 2 # Installs a debugging toolbox deployment toolbox: @@ -38,12 +37,12 @@ monitoring: cephClusterSpec: cephVersion: # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). - # v13 is mimic, v14 is nautilus, and v15 is octopus. + # v14 is nautilus, v15 is octopus, and v16 is pacific. # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. # Future versions such as `pacific` would require this to be set to `true`. # Do not set to true in production. @@ -53,25 +52,31 @@ cephClusterSpec: # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise # Use at your OWN risk # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade continueUpgradeAfterChecksEvenIfNotHealthy: false + # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. # The default wait timeout is 10 minutes. waitTimeoutForHealthyOSDInMinutes: 10 + mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. count: 3 # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. # Mons should only be allowed on the same node for test environments where data loss is acceptable. allowMultiplePerNode: false + mgr: # When higher availability of the mgr is needed, increase the count to 2. # In that case, one mgr will be active and one in standby. When Ceph updates which @@ -82,6 +87,7 @@ cephClusterSpec: # are already enabled by other settings in the cluster CR. - name: pg_autoscaler enabled: true + # enable the ceph dashboard for viewing cluster status dashboard: enabled: true @@ -91,35 +97,40 @@ cephClusterSpec: # port: 8443 # serve the dashboard using SSL ssl: true - #network: - # enable host networking - #provider: host - # EXPERIMENTAL: enable the Multus network provider - #provider: multus - #selectors: - # The selector keys are required to be `public` and `cluster`. - # Based on the configuration, the operator will do the following: - # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface - # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' - # - # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus - # - #public: public-conf --> NetworkAttachmentDefinition object name in Multus - #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus - # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 - #ipFamily: "IPv6" - # Ceph daemons to listen on both IPv4 and Ipv6 networks - #dualStack: false + + # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings + # network: + # # enable host networking + # provider: host + # # EXPERIMENTAL: enable the Multus network provider + # provider: multus + # selectors: + # # The selector keys are required to be `public` and `cluster`. + # # Based on the configuration, the operator will do the following: + # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # # + # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # # + # # public: public-conf --> NetworkAttachmentDefinition object name in Multus + # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + # ipFamily: "IPv6" + # # Ceph daemons to listen on both IPv4 and Ipv6 networks + # dualStack: false + # enable the crash collector for ceph daemon crash collection crashCollector: disable: false # Uncomment daysToRetain to prune ceph crash entries older than the # specified number of days. - #daysToRetain: 30 + # daysToRetain: 30 + # enable log collector, daemons will log on files and rotate # logCollector: # enabled: true # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. cleanupPolicy: # Since cluster cleanup is destructive to data, confirmation is required. @@ -144,100 +155,109 @@ cephClusterSpec: # allowUninstallWithVolumes defines how the uninstall should be performed # If set to true, cephCluster deletion does not wait for the PVs to be deleted. allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and # tolerate taints with a key of 'storage-node'. - # placement: - # all: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - storage-node - # podAffinity: - # podAntiAffinity: - # topologySpreadConstraints: - # tolerations: - # - key: storage-node - # operator: Exists - # The above placement information can also be specified for mon, osd, and mgr components - # mon: - # Monitor deployments may contain an anti-affinity rule for avoiding monitor - # collocation on the same node. This is a required rule when host network is used - # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a - # preferred rule with weight: 50. - # osd: - # mgr: - # cleanup: - #annotations: - # all: - # mon: - # osd: - # cleanup: - # prepareosd: - # If no mgr annotations are set, prometheus scrape annotations will be set by default. - # mgr: - #labels: - # all: - # mon: - # osd: - # cleanup: - # mgr: - # prepareosd: - # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. - # These labels can be passed as LabelSelector to Prometheus - # monitoring: - #resources: - # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory - # mgr: - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # The above example requests/limits can also be added to the other components - # mon: - # osd: - # prepareosd: - # mgr-sidecar: - # crashcollector: - # logcollector: - # cleanup: + # placement: + # all: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - storage-node + # podAffinity: + # podAntiAffinity: + # topologySpreadConstraints: + # tolerations: + # - key: storage-node + # operator: Exists + # # The above placement information can also be specified for mon, osd, and mgr components + # mon: + # # Monitor deployments may contain an anti-affinity rule for avoiding monitor + # # collocation on the same node. This is a required rule when host network is used + # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a + # # preferred rule with weight: 50. + # osd: + # mgr: + # cleanup: + + # annotations: + # all: + # mon: + # osd: + # cleanup: + # prepareosd: + # # If no mgr annotations are set, prometheus scrape annotations will be set by default. + # mgr: + + # labels: + # all: + # mon: + # osd: + # cleanup: + # mgr: + # prepareosd: + # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. + # # These labels can be passed as LabelSelector to Prometheus + # monitoring: + + # resources: + # # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory + # mgr: + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # # The above example requests/limits can also be added to the other components + # mon: + # osd: + # prepareosd: + # mgr-sidecar: + # crashcollector: + # logcollector: + # cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. removeOSDsIfOutAndSafeToRemove: false - # priorityClassNames: - # all: rook-ceph-default-priority-class - # mon: rook-ceph-mon-priority-class - # osd: rook-ceph-osd-priority-class - # mgr: rook-ceph-mgr-priority-class + + # priority classes to apply to ceph resources + # priorityClassNames: + # all: rook-ceph-default-priority-class + # mon: rook-ceph-mon-priority-class + # osd: rook-ceph-osd-priority-class + # mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection useAllNodes: true useAllDevices: true - #deviceFilter: - #config: - # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map - # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. - # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB - # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller - # osdsPerDevice: "1" # this value can be overridden at the node or device level - # encryptedDevice: "true" # the default value for this option is "false" - # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named - # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. - # nodes: - # - name: "172.17.4.201" - # devices: # specific devices to use for storage can be specified for each node - # - name: "sdb" - # - name: "nvme01" # multiple osds can be created on high performance devices - # config: - # osdsPerDevice: "5" - # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths - # config: # configuration can be specified at the node level which overrides the cluster level config - # - name: "172.17.4.301" - # deviceFilter: "^sd." + # deviceFilter: + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" + # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named + # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. disruptionManagement: # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically @@ -257,7 +277,7 @@ cephClusterSpec: # Namespace in which to watch for the MachineDisruptionBudgets. machineDisruptionBudgetNamespace: openshift-machine-api - # healthChecks + # Configure the healthcheck and liveness probes for ceph pods. # Valid values for daemons are 'mon', 'osd', 'status' healthCheck: daemonHealth: @@ -270,7 +290,7 @@ cephClusterSpec: status: disabled: false interval: 60s - # Change pod liveness probe, it works for all mon,mgr,osd daemons + # Change pod liveness probe, it works for all mon, mgr, and osd pods. livenessProbe: mon: disabled: false @@ -278,3 +298,124 @@ cephClusterSpec: disabled: false osd: disabled: false + +ingress: + dashboard: {} + # annotations: + # kubernetes.io/ingress.class: nginx + # external-dns.alpha.kubernetes.io/hostname: example.com + # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2 + # host: + # name: example.com + # path: "/ceph-dashboard(/|$)(.*)" + # tls: + +cephBlockPools: + - name: ceph-blockpool + # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration + spec: + failureDomain: host + replicated: + size: 3 + storageClass: + enabled: true + name: ceph-block + isDefault: true + reclaimPolicy: Delete + allowVolumeExpansion: true + # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration + parameters: + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +cephFileSystems: + - name: ceph-filesystem + # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration + spec: + metadataPool: + replicated: + size: 3 + dataPools: + - failureDomain: host + replicated: + size: 3 + metadataServer: + activeCount: 1 + activeStandby: true + storageClass: + enabled: true + isDefault: false + name: ceph-filesystem + reclaimPolicy: Delete + allowVolumeExpansion: true + # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration + parameters: + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +cephObjectStores: + - name: ceph-objectstore + # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration + spec: + metadataPool: + failureDomain: host + replicated: + size: 3 + dataPool: + failureDomain: host + erasureCoded: + dataChunks: 2 + codingChunks: 1 + preservePoolsOnDelete: true + gateway: + port: 80 + # securePort: 443 + # sslCertificateRef: + instances: 1 + healthCheck: + bucket: + interval: 60s + storageClass: + enabled: true + name: ceph-bucket + reclaimPolicy: Delete + # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration + parameters: + # note: objectStoreNamespace and objectStoreName are configured by the chart + region: us-east-1 diff --git a/cluster/charts/rook-ceph/Chart.yaml b/cluster/charts/rook-ceph/Chart.yaml index 715026d34c64..6b19f642b21b 100644 --- a/cluster/charts/rook-ceph/Chart.yaml +++ b/cluster/charts/rook-ceph/Chart.yaml @@ -2,6 +2,7 @@ apiVersion: v2 description: File, Block, and Object Storage Services for your Cloud-Native Environment name: rook-ceph version: 0.0.1 +appVersion: 0.0.1 icon: https://rook.io/images/rook-logo.svg sources: - https://github.com/rook/rook diff --git a/cluster/charts/rook-ceph/templates/clusterrole.yaml b/cluster/charts/rook-ceph/templates/clusterrole.yaml index 790a83b87c34..94426c4028ea 100644 --- a/cluster/charts/rook-ceph/templates/clusterrole.yaml +++ b/cluster/charts/rook-ceph/templates/clusterrole.yaml @@ -183,6 +183,7 @@ rules: - configmaps - nodes - nodes/proxy + - persistentvolumes verbs: - get - list @@ -197,6 +198,14 @@ rules: - list - get - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch --- # Aspects of ceph-mgr that require access to the system namespace kind: ClusterRole @@ -255,24 +264,6 @@ rules: verbs: - get - list -# Use a default dict to avoid 'can't give argument to non-function' errors from text/template -{{- if ne ((.Values.agent | default (dict "mountSecurityMode" "")).mountSecurityMode | default "") "Any" }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-ceph-agent-mount - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get -{{- end }} --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -369,6 +360,9 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -435,6 +429,9 @@ rules: - apiGroups: ["replication.storage.openshift.io"] resources: ["volumereplicationclasses/status"] verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] {{- end }} {{- if .Values.pspEnable }} --- diff --git a/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml b/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml index 36d539a25bf9..75fb36471be4 100644 --- a/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml +++ b/cluster/charts/rook-ceph/templates/clusterrolebinding.yaml @@ -152,23 +152,6 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding -metadata: - name: rook-ceph-system-psp-users - labels: - operator: rook - storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-system-psp-user -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding metadata: name: rook-csi-cephfs-provisioner-sa-psp roleRef: diff --git a/cluster/charts/rook-ceph/templates/deployment.yaml b/cluster/charts/rook-ceph/templates/deployment.yaml index 2e067855c109..c67cb2be9c53 100644 --- a/cluster/charts/rook-ceph/templates/deployment.yaml +++ b/cluster/charts/rook-ceph/templates/deployment.yaml @@ -26,6 +26,11 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: ["ceph", "operator"] + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir env: - name: ROOK_CURRENT_NAMESPACE_ONLY value: {{ .Values.currentNamespaceOnly | quote }} @@ -270,6 +275,10 @@ spec: - name: CSI_LOG_LEVEL value: {{ .Values.csi.logLevel | quote }} {{- end }} +{{- if .Values.csi.provisionerReplicas }} + - name: CSI_PROVISIONER_REPLICAS + value: {{ .Values.csi.provisionerReplicas | quote }} +{{- end }} {{- if .Values.csi.csiRBDProvisionerResource }} - name: CSI_RBD_PROVISIONER_RESOURCE value: {{ .Values.csi.csiRBDProvisionerResource | quote }} @@ -291,6 +300,8 @@ spec: value: "{{ .Values.enableFlexDriver }}" - name: ROOK_ENABLE_DISCOVERY_DAEMON value: "{{ .Values.enableDiscoveryDaemon }}" + - name: ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS + value: "{{ .Values.cephCommandsTimeoutSeconds }}" - name: ROOK_OBC_WATCH_OPERATOR_NAMESPACE value: "{{ .Values.enableOBCWatchOperatorNamespace }}" @@ -338,3 +349,8 @@ spec: {{- if .Values.rbacEnable }} serviceAccountName: rook-ceph-system {{- end }} + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/cluster/charts/rook-ceph/templates/resources.yaml index b522ae50afb6..12a9bd70f22e 100644 --- a/cluster/charts/rook-ceph/templates/resources.yaml +++ b/cluster/charts/rook-ceph/templates/resources.yaml @@ -1,5 +1,5 @@ {{- if .Values.crds.enabled }} -{{- if semverCompare ">=1.16.0" .Capabilities.KubeVersion.GitVersion }} +{{- if semverCompare ">=1.16.0-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -34,8 +34,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -62,13 +61,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -87,6 +84,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -802,6 +809,7 @@ spec: type: boolean count: description: Count is the number of Ceph monitors + maximum: 9 minimum: 0 type: integer stretchCluster: @@ -1234,7 +1242,6 @@ spec: description: HostNetwork to enable host network type: boolean ipFamily: - default: IPv4 description: IPFamily is the single stack IPv6 or IPv4 protocol enum: - IPv4 @@ -1877,6 +1884,7 @@ spec: config: additionalProperties: type: string + nullable: true type: object x-kubernetes-preserve-unknown-fields: true fullpath: @@ -1911,6 +1919,7 @@ spec: config: additionalProperties: type: string + nullable: true type: object x-kubernetes-preserve-unknown-fields: true fullpath: @@ -2122,6 +2131,8 @@ spec: type: object nullable: true type: array + onlyApplyOSDPlacement: + type: boolean storageClassDeviceSets: items: description: StorageClassDeviceSet is a storage class device set @@ -4444,8 +4455,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4472,13 +4482,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -4497,6 +4505,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -4602,8 +4620,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4630,13 +4647,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -4655,6 +4670,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -5622,16 +5647,179 @@ spec: properties: rados: description: RADOS is the Ganesha RADOS specification + nullable: true properties: namespace: description: Namespace is the RADOS namespace where NFS client recovery data is stored. type: string pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. + description: Pool used to represent the Ganesha's pool name in version older than 16.2.7 As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this setting will be ignored. type: string + poolConfig: + description: PoolConfig is the RADOS pool where Ganesha data is stored. + nullable: true + properties: + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object required: - namespace - - pool type: object server: description: Server is the Ganesha Server specification @@ -6224,7 +6412,6 @@ spec: - active type: object required: - - rados - server type: object status: @@ -6353,8 +6540,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -6381,13 +6567,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -6406,6 +6590,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -6515,6 +6709,10 @@ spec: nullable: true type: object x-kubernetes-preserve-unknown-fields: true + caBundleRef: + description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. + nullable: true + type: string externalRgwEndpoints: description: ExternalRgwEndpoints points to external rgw endpoint(s) items: @@ -7272,14 +7470,109 @@ spec: type: integer type: object type: object + readinessProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object type: object metadataPool: description: The metadata pool settings nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7306,13 +7599,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7331,6 +7622,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -7564,9 +7865,76 @@ spec: spec: description: ObjectStoreUserSpec represent the spec of an Objectstoreuser properties: + capabilities: + description: Additional admin-level capabilities for the Ceph object store user + nullable: true + properties: + bucket: + description: Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + metadata: + description: Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + usage: + description: Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + user: + description: Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + zone: + description: Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + type: object displayName: description: The display name for the ceph users type: string + quotas: + description: ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more + nullable: true + properties: + maxBuckets: + description: Maximum bucket limit for the ceph user + nullable: true + type: integer + maxObjects: + description: Maximum number of objects across all the user's buckets + format: int64 + nullable: true + type: integer + maxSize: + anyOf: + - type: integer + - type: string + description: Maximum size limit of all objects across all the user's buckets See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info. + nullable: true + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object store: description: The store the user will be created in type: string @@ -7697,8 +8065,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7725,13 +8092,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7750,6 +8115,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -7853,8 +8228,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7881,13 +8255,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7906,6 +8278,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -8803,228 +9185,6 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: volumereplicationclasses.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplicationClass - listKind: VolumeReplicationClassList - plural: volumereplicationclasses - shortNames: - - vrc - singular: volumereplicationclass - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.provisioner - name: provisioner - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplicationClass is the Schema for the volumereplicationclasses API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationClassSpec specifies parameters that an underlying storage system uses when creating a volume replica. A specific VolumeReplicationClass is used by specifying its name in a VolumeReplication object. - properties: - parameters: - additionalProperties: - type: string - description: Parameters is a key-value map with storage provisioner specific configurations for creating volume replicas - type: object - provisioner: - description: Provisioner is the name of storage provisioner - type: string - required: - - provisioner - type: object - status: - description: VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - helm.sh/resource-policy: keep - creationTimestamp: null - name: volumereplications.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplication - listKind: VolumeReplicationList - plural: volumereplications - shortNames: - - vr - singular: volumereplication - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.volumeReplicationClass - name: volumeReplicationClass - type: string - - jsonPath: .spec.dataSource.name - name: pvcName - type: string - - jsonPath: .spec.replicationState - name: desiredState - type: string - - jsonPath: .status.state - name: currentState - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplication is the Schema for the volumereplications API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationSpec defines the desired state of VolumeReplication - properties: - dataSource: - description: DataSource represents the object associated with the volume - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - replicationState: - description: ReplicationState represents the replication operation to be performed on the volume. Supported operations are "primary", "secondary" and "resync" - enum: - - primary - - secondary - - resync - type: string - volumeReplicationClass: - description: VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource - type: string - required: - - dataSource - - replicationState - - volumeReplicationClass - type: object - status: - description: VolumeReplicationStatus defines the observed state of VolumeReplication - properties: - conditions: - description: Conditions are the list of conditions and their status. - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastCompletionTime: - format: date-time - type: string - lastStartTime: - format: date-time - type: string - message: - type: string - observedGeneration: - description: observedGeneration is the last generation change the operator has dealt with - format: int64 - type: integer - state: - description: State captures the latest state of the replication operation - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c @@ -9777,6 +9937,10 @@ spec: enum: - image - pool + peers: + properties: + secretNames: + type: array snapshotSchedules: type: object properties: @@ -9867,5 +10031,6 @@ spec: version: v1 subresources: status: {} + {{- end }} {{- end }} diff --git a/cluster/charts/rook-ceph/templates/role.yaml b/cluster/charts/rook-ceph/templates/role.yaml index f4b2fbf6d2b2..c27c11cd5f49 100644 --- a/cluster/charts/rook-ceph/templates/role.yaml +++ b/cluster/charts/rook-ceph/templates/role.yaml @@ -106,6 +106,20 @@ rules: - "*" verbs: - "*" +- apiGroups: + - apps + resources: + - deployments/scale + - deployments + verbs: + - patch + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 @@ -174,5 +188,5 @@ rules: verbs: ["get", "list", "delete" ] - apiGroups: [""] resources: ["persistentvolumeclaims"] - verbs: ["delete"] + verbs: ["get", "update", "delete"] {{- end }} diff --git a/cluster/charts/rook-ceph/values.yaml b/cluster/charts/rook-ceph/values.yaml index 0a1ceb06d2cb..956930d2e049 100644 --- a/cluster/charts/rook-ceph/values.yaml +++ b/cluster/charts/rook-ceph/values.yaml @@ -78,7 +78,7 @@ csi: # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - cephFSFSGroupPolicy: "ReadWriteOnceWithFSType" + cephFSFSGroupPolicy: "None" # OMAP generator generates the omap mapping between the PV name and the RBD image # which helps CSI to identify the rbd images for CSI operations. @@ -87,6 +87,9 @@ csi: # sidecar with CSI provisioner pod, to enable set it to true. enableOMAPGenerator: false + # Set replicas for csi provisioner deployment. + provisionerReplicas: 2 + # Set logging level for csi containers. # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. #logLevel: 0 @@ -270,7 +273,7 @@ csi: #rbdLivenessMetricsPort: 9080 #kubeletDirPath: /var/lib/kubelet #cephcsi: - #image: quay.io/cephcsi/cephcsi:v3.3.1 + #image: quay.io/cephcsi/cephcsi:v3.4.0 #registrar: #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 #provisioner: @@ -285,13 +288,16 @@ csi: #cephfsPodLabels: "key1=value1,key2=value2" # Labels to add to the CSI RBD Deployments and DaemonSets Pods. #rbdPodLabels: "key1=value1,key2=value2" - # Enable volume replication controller + # Enable the volume replication controller. + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring volumeReplication: enabled: false #image: "quay.io/csiaddons/volumereplication-operator:v0.1.0" enableFlexDriver: false enableDiscoveryDaemon: false +cephCommandsTimeoutSeconds: "15" # enable the ability to have multiple Ceph filesystems in the same cluster # WARNING: Experimental feature in Ceph Releases Octopus (v15) and Nautilus (v14) diff --git a/cluster/examples/kubernetes/cassandra/cluster.yaml b/cluster/examples/kubernetes/cassandra/cluster.yaml deleted file mode 100644 index 0cef61e3f5ad..000000000000 --- a/cluster/examples/kubernetes/cassandra/cluster.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# Namespace where the Cassandra Cluster will be created -apiVersion: v1 -kind: Namespace -metadata: - name: rook-cassandra - ---- -# Role for cassandra members. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rook-cassandra-member - namespace: rook-cassandra -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - patch - - watch - - apiGroups: - - cassandra.rook.io - resources: - - clusters - verbs: - - get - ---- -# ServiceAccount for cassandra members. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-cassandra-member - namespace: rook-cassandra - ---- -# RoleBinding for cassandra members. -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-cassandra-member - namespace: rook-cassandra -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-cassandra-member -subjects: - - kind: ServiceAccount - name: rook-cassandra-member - namespace: rook-cassandra - ---- -# Cassandra Cluster -apiVersion: cassandra.rook.io/v1alpha1 -kind: Cluster -metadata: - name: rook-cassandra - namespace: rook-cassandra -spec: - version: 3.11.6 - mode: cassandra - # A key/value list of annotations - annotations: - # key: value - datacenter: - name: us-east-1 - racks: - - name: us-east-1a - members: 3 - storage: - volumeClaimTemplates: - - metadata: - name: rook-cassandra-data - spec: - resources: - requests: - storage: 5Gi - resources: - requests: - cpu: 1 - memory: 2Gi - limits: - cpu: 1 - memory: 2Gi diff --git a/cluster/examples/kubernetes/cassandra/crds.yaml b/cluster/examples/kubernetes/cassandra/crds.yaml deleted file mode 100644 index d619be299609..000000000000 --- a/cluster/examples/kubernetes/cassandra/crds.yaml +++ /dev/null @@ -1,894 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: clusters.cassandra.rook.io -spec: - group: cassandra.rook.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec is the desired state for a Cassandra Cluster. - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - datacenter: - description: Datacenter that will make up this cluster. - nullable: true - properties: - name: - description: Name of the Cassandra Datacenter. Used in the cassandra-rackdc.properties file. - type: string - racks: - description: Racks of the specific Datacenter. - items: - description: RackSpec is the desired state for a Cassandra Rack. - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - nullable: true - type: object - configMapName: - description: User-provided ConfigMap applied to the specific statefulset. - nullable: true - type: string - jmxExporterConfigMapName: - description: User-provided ConfigMap for jmx prometheus exporter - nullable: true - type: string - members: - description: Members is the number of Cassandra instances in this rack. - format: int32 - type: integer - name: - description: Name of the Cassandra Rack. Used in the cassandra-rackdc.properties file. - type: string - placement: - description: Placement describes restrictions for the nodes Cassandra is scheduled on. - nullable: true - properties: - nodeAffinity: - description: NodeAffinity is a group of node affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: PodAffinity is a group of inter pod affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - tolerations: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology - items: - description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - maxSkew: - description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' - format: int32 - type: integer - topologyKey: - description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - type: object - resources: - description: Resources the Cassandra Pods will use. - nullable: true - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - storage: - description: Storage describes the underlying storage that Cassandra will consume. - properties: - nodes: - items: - description: Node is a storage nodes - properties: - name: - type: string - type: object - nullable: true - type: array - volumeClaimTemplates: - description: PersistentVolumeClaims to use as storage - items: - description: PersistentVolumeClaim is a user's request for and claim to a persistent volume - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - status: - description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying volume. - type: object - conditions: - description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details about last transition. - type: string - reason: - description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string - type: object - type: object - type: array - type: object - required: - - members - - name - type: object - type: array - required: - - name - - racks - type: object - mode: - description: Mode selects an operating mode. - type: string - repository: - description: Repository to pull the image from. - nullable: true - type: string - sidecarImage: - description: User-provided image for the sidecar that replaces default. - nullable: true - properties: - repository: - description: Repository to pull the image from. - type: string - version: - description: Version of the image. - type: string - required: - - version - type: object - version: - description: Version of Cassandra to use. - type: string - required: - - version - type: object - status: - description: ClusterStatus is the status of a Cassandra Cluster - nullable: true - properties: - racks: - additionalProperties: - description: RackStatus is the status of a Cassandra Rack - properties: - conditions: - description: Conditions are the latest available observations of a rack's state. - items: - description: RackCondition is an observation about the state of a rack. - properties: - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - members: - description: Members is the current number of members requested in the specific Rack - format: int32 - type: integer - readyMembers: - description: ReadyMembers is the number of ready members in the specific Rack - format: int32 - type: integer - required: - - members - - readyMembers - type: object - type: object - type: object - required: - - metadata - - spec - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/cluster/examples/kubernetes/cassandra/operator.yaml b/cluster/examples/kubernetes/cassandra/operator.yaml deleted file mode 100644 index cc406664e856..000000000000 --- a/cluster/examples/kubernetes/cassandra/operator.yaml +++ /dev/null @@ -1,123 +0,0 @@ -# Namespace where Cassandra Operator will live -apiVersion: v1 -kind: Namespace -metadata: - name: rook-cassandra-system # namespace:operator ---- -# ClusterRole for cassandra-operator. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-cassandra-operator -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - delete - - apiGroups: - - "" - resources: - - services - verbs: - - "*" - - apiGroups: - - "" - resources: - - persistentvolumes - - persistentvolumeclaims - verbs: - - get - - delete - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - "*" - - apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - apiGroups: - - cassandra.rook.io - resources: - - "*" - verbs: - - "*" - - apiGroups: - - "" - resources: - - events - verbs: - - create - - update - - patch ---- -# ServiceAccount for cassandra-operator. Serves as its authorization identity. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-cassandra-operator - namespace: rook-cassandra-system # namespace:operator ---- -# Bind cassandra-operator ServiceAccount with ClusterRole. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-cassandra-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-cassandra-operator -subjects: - - kind: ServiceAccount - name: rook-cassandra-operator - namespace: rook-cassandra-system # namespace:operator ---- -# cassandra-operator StatefulSet. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: rook-cassandra-operator - namespace: rook-cassandra-system # namespace:operator - labels: - app: rook-cassandra-operator -spec: - replicas: 1 - serviceName: "non-existent-service" - selector: - matchLabels: - app: rook-cassandra-operator - template: - metadata: - labels: - app: rook-cassandra-operator - spec: - serviceAccountName: rook-cassandra-operator - containers: - - name: rook-cassandra-operator - image: rook/cassandra:master - imagePullPolicy: "Always" - args: ["cassandra", "operator"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace diff --git a/cluster/examples/kubernetes/ceph/cluster-external-management.yaml b/cluster/examples/kubernetes/ceph/cluster-external-management.yaml index c8cd5f90b118..8d50dcfd6492 100644 --- a/cluster/examples/kubernetes/ceph/cluster-external-management.yaml +++ b/cluster/examples/kubernetes/ceph/cluster-external-management.yaml @@ -19,4 +19,4 @@ spec: dataDirHostPath: /var/lib/rook # providing an image is required, if you want to create other CRs (rgw, mds, nfs) cephVersion: - image: quay.io/ceph/ceph:v16.2.5 # Should match external cluster version + image: quay.io/ceph/ceph:v16.2.6 # Should match external cluster version diff --git a/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml b/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml index 97bc0319a1a1..900030d76eb4 100644 --- a/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml +++ b/cluster/examples/kubernetes/ceph/cluster-on-local-pvc.yaml @@ -26,7 +26,8 @@ spec: accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain - volumeMode: Block + # PV for mon must be a filesystem volume. + volumeMode: Filesystem local: # If you want to use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. path: /dev/sdb @@ -50,6 +51,7 @@ spec: accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain + # PV for OSD must be a block volume. volumeMode: Block local: path: /dev/sdc @@ -73,7 +75,7 @@ spec: accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain - volumeMode: Block + volumeMode: Filesystem local: path: /dev/sdb nodeAffinity: @@ -119,7 +121,7 @@ spec: accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain - volumeMode: Block + volumeMode: Filesystem local: path: /dev/sdb nodeAffinity: @@ -171,7 +173,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false @@ -193,6 +195,18 @@ spec: tuneDeviceClass: true tuneFastDeviceClass: false encrypted: false + placement: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-osd + - rook-ceph-osd-prepare preparePlacement: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -208,6 +222,7 @@ spec: operator: In values: - rook-ceph-osd-prepare + topologyKey: kubernetes.io/hostname resources: # These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources # limits: @@ -231,6 +246,8 @@ spec: volumeMode: Block accessModes: - ReadWriteOnce + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement + onlyApplyOSDPlacement: false resources: # prepareosd: # limits: diff --git a/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml b/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml index 677ed31e5297..2c612596bff9 100644 --- a/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml +++ b/cluster/examples/kubernetes/ceph/cluster-on-pvc.yaml @@ -14,7 +14,8 @@ metadata: spec: dataDirHostPath: /var/lib/rook mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. count: 3 # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. # Mons should only be allowed on the same node for test environments where data loss is acceptable. @@ -32,7 +33,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false @@ -158,6 +159,8 @@ spec: # - ReadWriteOnce # Scheduler name for OSD pod placement # schedulerName: osd-scheduler + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement. + onlyApplyOSDPlacement: false resources: # prepareosd: # limits: diff --git a/cluster/examples/kubernetes/ceph/cluster-stretched.yaml b/cluster/examples/kubernetes/ceph/cluster-stretched.yaml index d26ca53b1601..57a33b3fb7bb 100644 --- a/cluster/examples/kubernetes/ceph/cluster-stretched.yaml +++ b/cluster/examples/kubernetes/ceph/cluster-stretched.yaml @@ -39,7 +39,7 @@ spec: count: 2 cephVersion: # Stretch cluster support upstream is only available starting in Ceph Pacific - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: true skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false diff --git a/cluster/examples/kubernetes/ceph/cluster-test.yaml b/cluster/examples/kubernetes/ceph/cluster-test.yaml index 9f602c5b28db..0855f95bddb2 100644 --- a/cluster/examples/kubernetes/ceph/cluster-test.yaml +++ b/cluster/examples/kubernetes/ceph/cluster-test.yaml @@ -28,7 +28,7 @@ metadata: spec: dataDirHostPath: /var/lib/rook cephVersion: - image: quay.io/ceph/ceph:v16 + image: quay.io/ceph/ceph:v16.2.6 allowUnsupported: true mon: count: 1 diff --git a/cluster/examples/kubernetes/ceph/cluster.yaml b/cluster/examples/kubernetes/ceph/cluster.yaml index 24831ab32da1..0b52bce594f1 100644 --- a/cluster/examples/kubernetes/ceph/cluster.yaml +++ b/cluster/examples/kubernetes/ceph/cluster.yaml @@ -16,13 +16,13 @@ metadata: spec: cephVersion: # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). - # v13 is mimic, v14 is nautilus, and v15 is octopus. + # v14 is nautilus, v15 is octopus, and v16 is pacific. # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.5-20210708 + # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.6-20210918 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v16.2.5 - # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. + image: quay.io/ceph/ceph:v16.2.6 + # Whether to allow unsupported versions of Ceph. Currently `nautilus`, `octopus`, and `pacific` are supported. # Future versions such as `pacific` would require this to be set to `true`. # Do not set to true in production. allowUnsupported: false @@ -44,7 +44,8 @@ spec: # The default wait timeout is 10 minutes. waitTimeoutForHealthyOSDInMinutes: 10 mon: - # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. count: 3 # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. # Mons should only be allowed on the same node for test environments where data loss is acceptable. @@ -177,6 +178,7 @@ spec: # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. # These labels can be passed as LabelSelector to Prometheus # monitoring: +# crashcollector: resources: # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory # mgr: @@ -218,17 +220,19 @@ spec: # encryptedDevice: "true" # the default value for this option is "false" # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. -# nodes: -# - name: "172.17.4.201" -# devices: # specific devices to use for storage can be specified for each node -# - name: "sdb" -# - name: "nvme01" # multiple osds can be created on high performance devices -# config: -# osdsPerDevice: "5" -# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths -# config: # configuration can be specified at the node level which overrides the cluster level config -# - name: "172.17.4.301" -# deviceFilter: "^sd." + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd + onlyApplyOSDPlacement: false # The section for configuring management of daemon disruptions during upgrade or fencing. disruptionManagement: # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically diff --git a/cluster/examples/kubernetes/ceph/common-second-cluster.yaml b/cluster/examples/kubernetes/ceph/common-second-cluster.yaml index cbe12e5337dd..e88e4230336d 100644 --- a/cluster/examples/kubernetes/ceph/common-second-cluster.yaml +++ b/cluster/examples/kubernetes/ceph/common-second-cluster.yaml @@ -145,7 +145,7 @@ rules: verbs: ["get", "list", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] - verbs: ["delete"] + verbs: ["get", "update", "delete"] --- # Allow the osd purge job to run in this namespace kind: RoleBinding diff --git a/cluster/examples/kubernetes/ceph/common.yaml b/cluster/examples/kubernetes/ceph/common.yaml index 47bb8865a894..7a082c0581e0 100644 --- a/cluster/examples/kubernetes/ceph/common.yaml +++ b/cluster/examples/kubernetes/ceph/common.yaml @@ -290,6 +290,7 @@ rules: - configmaps - nodes - nodes/proxy + - persistentvolumes verbs: - get - list @@ -304,6 +305,14 @@ rules: - list - get - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -520,6 +529,20 @@ rules: - "*" verbs: - "*" + - apiGroups: + - apps + resources: + - deployments/scale + - deployments + verbs: + - patch + - delete + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete # OLM: END CLUSTER ROLE # OLM: BEGIN CMD REPORTER ROLE --- @@ -1063,6 +1086,9 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -1129,6 +1155,9 @@ rules: - apiGroups: ["replication.storage.openshift.io"] resources: ["volumereplicationclasses/status"] verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] # OLM: END CSI RBD CLUSTER ROLE # OLM: BEGIN CSI RBD CLUSTER ROLEBINDING --- @@ -1203,7 +1232,7 @@ rules: verbs: ["get", "list", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] - verbs: ["delete"] + verbs: ["get", "update", "delete"] --- # Allow the osd purge job to run in this namespace kind: RoleBinding diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/cluster/examples/kubernetes/ceph/crds.yaml index 0a23ac5260a5..7693a9421ea7 100644 --- a/cluster/examples/kubernetes/ceph/crds.yaml +++ b/cluster/examples/kubernetes/ceph/crds.yaml @@ -36,8 +36,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -64,13 +63,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -89,6 +86,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -802,6 +809,7 @@ spec: type: boolean count: description: Count is the number of Ceph monitors + maximum: 9 minimum: 0 type: integer stretchCluster: @@ -1234,7 +1242,6 @@ spec: description: HostNetwork to enable host network type: boolean ipFamily: - default: IPv4 description: IPFamily is the single stack IPv6 or IPv4 protocol enum: - IPv4 @@ -1877,6 +1884,7 @@ spec: config: additionalProperties: type: string + nullable: true type: object x-kubernetes-preserve-unknown-fields: true fullpath: @@ -1911,6 +1919,7 @@ spec: config: additionalProperties: type: string + nullable: true type: object x-kubernetes-preserve-unknown-fields: true fullpath: @@ -2122,6 +2131,8 @@ spec: type: object nullable: true type: array + onlyApplyOSDPlacement: + type: boolean storageClassDeviceSets: items: description: StorageClassDeviceSet is a storage class device set @@ -4442,8 +4453,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4470,13 +4480,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -4495,6 +4503,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -4600,8 +4618,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4628,13 +4645,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -4653,6 +4668,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -5619,16 +5644,179 @@ spec: properties: rados: description: RADOS is the Ganesha RADOS specification + nullable: true properties: namespace: description: Namespace is the RADOS namespace where NFS client recovery data is stored. type: string pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. + description: Pool used to represent the Ganesha's pool name in version older than 16.2.7 As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this setting will be ignored. type: string + poolConfig: + description: PoolConfig is the RADOS pool where Ganesha data is stored. + nullable: true + properties: + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object required: - namespace - - pool type: object server: description: Server is the Ganesha Server specification @@ -6221,7 +6409,6 @@ spec: - active type: object required: - - rados - server type: object status: @@ -6348,8 +6535,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -6376,13 +6562,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -6401,6 +6585,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -6510,6 +6704,10 @@ spec: nullable: true type: object x-kubernetes-preserve-unknown-fields: true + caBundleRef: + description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. + nullable: true + type: string externalRgwEndpoints: description: ExternalRgwEndpoints points to external rgw endpoint(s) items: @@ -7267,14 +7465,109 @@ spec: type: integer type: object type: object + readinessProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object type: object metadataPool: description: The metadata pool settings nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7301,13 +7594,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7326,6 +7617,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -7558,9 +7859,76 @@ spec: spec: description: ObjectStoreUserSpec represent the spec of an Objectstoreuser properties: + capabilities: + description: Additional admin-level capabilities for the Ceph object store user + nullable: true + properties: + bucket: + description: Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + metadata: + description: Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + usage: + description: Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + user: + description: Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + zone: + description: Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + type: object displayName: description: The display name for the ceph users type: string + quotas: + description: ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more + nullable: true + properties: + maxBuckets: + description: Maximum bucket limit for the ceph user + nullable: true + type: integer + maxObjects: + description: Maximum number of objects across all the user's buckets + format: int64 + nullable: true + type: integer + maxSize: + anyOf: + - type: integer + - type: string + description: Maximum size limit of all objects across all the user's buckets See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info. + nullable: true + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object store: description: The store the user will be created in type: string @@ -7689,8 +8057,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7717,13 +8084,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7742,6 +8107,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -7845,8 +8220,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7873,13 +8247,11 @@ spec: description: The algorithm for erasure coding type: string codingChunks: - description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). This is the number of OSDs that can be lost simultaneously before data cannot be recovered. minimum: 0 type: integer dataChunks: - description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) - maximum: 9 + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). The number of chunks required to recover an object when any single OSD is lost is the same as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. minimum: 0 type: integer required: @@ -7898,6 +8270,16 @@ spec: mode: description: 'Mode is the mirroring mode: either pool or image' type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object snapshotSchedules: description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools items: @@ -8790,226 +9172,6 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: volumereplicationclasses.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplicationClass - listKind: VolumeReplicationClassList - plural: volumereplicationclasses - shortNames: - - vrc - singular: volumereplicationclass - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.provisioner - name: provisioner - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplicationClass is the Schema for the volumereplicationclasses API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationClassSpec specifies parameters that an underlying storage system uses when creating a volume replica. A specific VolumeReplicationClass is used by specifying its name in a VolumeReplication object. - properties: - parameters: - additionalProperties: - type: string - description: Parameters is a key-value map with storage provisioner specific configurations for creating volume replicas - type: object - provisioner: - description: Provisioner is the name of storage provisioner - type: string - required: - - provisioner - type: object - status: - description: VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: volumereplications.replication.storage.openshift.io -spec: - group: replication.storage.openshift.io - names: - kind: VolumeReplication - listKind: VolumeReplicationList - plural: volumereplications - shortNames: - - vr - singular: volumereplication - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.volumeReplicationClass - name: volumeReplicationClass - type: string - - jsonPath: .spec.dataSource.name - name: pvcName - type: string - - jsonPath: .spec.replicationState - name: desiredState - type: string - - jsonPath: .status.state - name: currentState - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: VolumeReplication is the Schema for the volumereplications API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VolumeReplicationSpec defines the desired state of VolumeReplication - properties: - dataSource: - description: DataSource represents the object associated with the volume - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - replicationState: - description: ReplicationState represents the replication operation to be performed on the volume. Supported operations are "primary", "secondary" and "resync" - enum: - - primary - - secondary - - resync - type: string - volumeReplicationClass: - description: VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource - type: string - required: - - dataSource - - replicationState - - volumeReplicationClass - type: object - status: - description: VolumeReplicationStatus defines the observed state of VolumeReplication - properties: - conditions: - description: Conditions are the list of conditions and their status. - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastCompletionTime: - format: date-time - type: string - lastStartTime: - format: date-time - type: string - message: - type: string - observedGeneration: - description: observedGeneration is the last generation change the operator has dealt with - format: int64 - type: integer - state: - description: State captures the latest state of the replication operation - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c diff --git a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py b/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py index cc6a7fa6e64e..0650fd3dcdbb 100644 --- a/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py +++ b/cluster/examples/kubernetes/ceph/create-external-cluster-resources.py @@ -80,10 +80,13 @@ def _init_cmd_output_map(self): self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}''' self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}''' self.cmd_names['caps_change_default_pool_prefix'] = '''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}''' + self.cmd_names['mgr services'] = '''{"format": "json", "prefix": "mgr services"}''' # all the commands and their output self.cmd_output_map[self.cmd_names['fs ls'] ] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]''' self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}''' + self.cmd_output_map[self.cmd_names['mgr services'] + ] = '''{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}''' self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]''' self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]''' self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]''' @@ -367,9 +370,9 @@ def _convert_hostname_to_ip(self, host_name): def get_active_and_standby_mgrs(self): monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port - monitoring_endpoint_ip = self._arg_parser.monitoring_endpoint + monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint standby_mgrs = [] - if not monitoring_endpoint_ip: + if not monitoring_endpoint_ip_list: cmd_json = {"prefix": "status", "format": "json"} ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, @@ -392,7 +395,7 @@ def get_active_and_standby_mgrs(self): except ValueError: raise ExecutionFailureException( "invalid endpoint: {}".format(monitoring_endpoint)) - monitoring_endpoint_ip = parsed_endpoint.hostname + monitoring_endpoint_ip_list = parsed_endpoint.hostname if not monitoring_endpoint_port: monitoring_endpoint_port = "{}".format(parsed_endpoint.port) @@ -400,6 +403,18 @@ def get_active_and_standby_mgrs(self): if not monitoring_endpoint_port: monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT + # user could give comma and space separated inputs (like --monitoring-endpoint=", ") + monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace( + ",", " ") + monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split() + # if monitoring-endpoint could not be found, raise an error + if len(monitoring_endpoint_ip_list_split) == 0: + raise ExecutionFailureException("No 'monitoring-endpoint' found") + # first ip is treated as the main monitoring-endpoint + monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0] + # rest of the ip-s are added to the 'standby_mgrs' list + standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:]) + try: failed_ip = monitoring_endpoint_ip monitoring_endpoint_ip = self._convert_hostname_to_ip( @@ -586,28 +601,38 @@ def create_checkerKey(self): "Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST)) return str(json_out[0]['key']) + def get_ceph_dashboard_link(self): + cmd_json = {"prefix": "mgr services", "format": "json"} + ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json) + # if there is an unsuccessful attempt, + if ret_val != 0 or len(json_out) == 0: + return None + if not 'dashboard' in json_out: + return None + return json_out['dashboard'] + def create_rgw_admin_ops_user(self): cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name', 'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read'] try: output = subprocess.check_output(cmd, stderr=subprocess.PIPE) - except subprocess.CalledProcessError as exec: + except subprocess.CalledProcessError as execErr: # if the user already exists, we just query it - if exec.returncode == errno.EEXIST: + if execErr.returncode == errno.EEXIST: cmd = ['radosgw-admin', 'user', 'info', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME ] try: output = subprocess.check_output(cmd, stderr=subprocess.PIPE) - except subprocess.CalledProcessError as exec: + except subprocess.CalledProcessError as execErr: err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % ( - cmd, exec.output, exec.returncode, exec.stderr) + cmd, execErr.output, execErr.returncode, execErr.stderr) raise Exception(err_msg) else: err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % ( - cmd, exec.output, exec.returncode, exec.stderr) + cmd, execErr.output, execErr.returncode, execErr.stderr) raise Exception(err_msg) jsonoutput = json.loads(output) @@ -642,6 +667,7 @@ def _gen_output_map(self): self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data() self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey() + self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] = self.get_ceph_dashboard_link() self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode() self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner() self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name @@ -728,6 +754,16 @@ def gen_json_out(self): } ] + # if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret + if self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']: + json_out.append({ + "name": "rook-ceph-dashboard-link", + "kind": "Secret", + "data": { + "userID": 'ceph-dashboard-link', + "userKey": self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] + } + }) # if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret if self.out_map['CSI_RBD_PROVISIONER_SECRET']: json_out.append({ diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml new file mode 100644 index 000000000000..d5035e792ff0 --- /dev/null +++ b/cluster/examples/kubernetes/ceph/csi/cephfs/pod-ephemeral.yaml @@ -0,0 +1,21 @@ +kind: Pod +apiVersion: v1 +metadata: + name: csi-cephfs-demo-ephemeral-pod +spec: + containers: + - name: web-server + image: docker.io/library/nginx:latest + volumeMounts: + - mountPath: "/myspace" + name: mypvc + volumes: + - name: mypvc + ephemeral: + volumeClaimTemplate: + spec: + accessModes: ["ReadWriteMany"] + storageClassName: "rook-cephfs" + resources: + requests: + storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml index 92f1ca8b562e..6c792812921a 100644 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml +++ b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml @@ -2,9 +2,11 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-cephfs +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator parameters: - # clusterID is the namespace where operator is deployed. + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined clusterID: rook-ceph # namespace:cluster # CephFS filesystem name into which the volume shall be created diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml index fc8169b643dc..9b7c0ac7e62f 100644 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml +++ b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml @@ -2,9 +2,11 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-cephfs +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator parameters: - # clusterID is the namespace where operator is deployed. + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined clusterID: rook-ceph # namespace:cluster # CephFS filesystem name into which the volume shall be created diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml new file mode 100644 index 000000000000..bd752470b76c --- /dev/null +++ b/cluster/examples/kubernetes/ceph/csi/rbd/pod-ephemeral.yaml @@ -0,0 +1,21 @@ +kind: Pod +apiVersion: v1 +metadata: + name: csi-rbd-demo-ephemeral-pod +spec: + containers: + - name: web-server + image: docker.io/library/nginx:latest + volumeMounts: + - mountPath: "/myspace" + name: mypvc + volumes: + - name: mypvc + ephemeral: + volumeClaimTemplate: + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "rook-ceph-block" + resources: + requests: + storage: 1Gi diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml index f49f2fa89c91..c62507ffc5f9 100644 --- a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml +++ b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass-ec.yaml @@ -77,9 +77,9 @@ parameters: # will set default as `ext4`. csi.storage.k8s.io/fstype: ext4 # uncomment the following to use rbd-nbd as mounter on supported nodes -# **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi -# issue that causes the mount to be disconnected. You will need to follow special upgrade steps -# to restart your application pods. Therefore, this option is not recommended. +# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach +# the PVC to application pod if nodeplugin pod restart. +# Its still in Alpha support. Therefore, this option is not recommended for production use. #mounter: rbd-nbd allowVolumeExpansion: true reclaimPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml index 8077139fcb1c..98ef451f7dd7 100644 --- a/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml +++ b/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml @@ -66,9 +66,9 @@ parameters: # in hyperconverged settings where the volume is mounted on the same node as the osds. csi.storage.k8s.io/fstype: ext4 # uncomment the following to use rbd-nbd as mounter on supported nodes -# **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi -# issue that causes the mount to be disconnected. You will need to follow special upgrade steps -# to restart your application pods. Therefore, this option is not recommended. +# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach +# the PVC to application pod if nodeplugin pod restart. +# Its still in Alpha support. Therefore, this option is not recommended for production use. #mounter: rbd-nbd allowVolumeExpansion: true reclaimPolicy: Delete diff --git a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml index 5d8541d7c3f1..c8d7c2a37561 100644 --- a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml +++ b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml @@ -17,7 +17,7 @@ spec: {{ $key }}: "{{ $value }}" {{ end }} spec: - serviceAccount: rook-csi-cephfs-provisioner-sa + serviceAccountName: rook-csi-cephfs-provisioner-sa {{ if .ProvisionerPriorityClassName }} priorityClassName: {{ .ProvisionerPriorityClassName }} {{ end }} @@ -34,11 +34,6 @@ spec: - name: ADDRESS value: /csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumeMounts: - name: socket-dir mountPath: /csi @@ -55,11 +50,6 @@ spec: - name: ADDRESS value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumeMounts: - name: socket-dir mountPath: /csi @@ -77,11 +67,6 @@ spec: - name: ADDRESS value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumeMounts: - name: socket-dir mountPath: /csi @@ -98,11 +83,6 @@ spec: - name: ADDRESS value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumeMounts: - name: socket-dir mountPath: /csi @@ -136,11 +116,6 @@ spec: - name: CSI_ENDPOINT value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumeMounts: - name: socket-dir mountPath: /csi @@ -175,11 +150,6 @@ spec: - name: socket-dir mountPath: /csi imagePullPolicy: "IfNotPresent" - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true volumes: - name: socket-dir emptyDir: { diff --git a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml index 6d1d811fda88..31251daae52c 100644 --- a/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml +++ b/cluster/examples/kubernetes/ceph/csi/template/cephfs/csi-cephfsplugin.yaml @@ -18,7 +18,7 @@ spec: {{ $key }}: "{{ $value }}" {{ end }} spec: - serviceAccount: rook-csi-cephfs-plugin-sa + serviceAccountName: rook-csi-cephfs-plugin-sa hostNetwork: {{ .EnableCSIHostNetwork }} {{ if .PluginPriorityClassName }} priorityClassName: {{ .PluginPriorityClassName }} diff --git a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml index df35b8f1b83b..8ac752b3da40 100644 --- a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml +++ b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml @@ -17,7 +17,7 @@ spec: {{ $key }}: "{{ $value }}" {{ end }} spec: - serviceAccount: rook-csi-rbd-provisioner-sa + serviceAccountName: rook-csi-rbd-provisioner-sa {{ if .ProvisionerPriorityClassName }} priorityClassName: {{ .ProvisionerPriorityClassName }} {{ end }} @@ -103,7 +103,7 @@ spec: fieldPath: metadata.namespace imagePullPolicy: "IfNotPresent" volumeMounts: - - name: ceph-csi-config + - name: ceph-csi-configs mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys @@ -166,7 +166,7 @@ spec: - mountPath: /lib/modules name: lib-modules readOnly: true - - name: ceph-csi-config + - name: ceph-csi-configs mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys @@ -204,12 +204,21 @@ spec: emptyDir: { medium: "Memory" } - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json + - name: ceph-csi-configs + projected: + sources: + - name: ceph-csi-config + configMap: + name: rook-ceph-csi-config + items: + - key: csi-cluster-config-json + path: config.json + - name: ceph-csi-mapping-config + configMap: + name: rook-ceph-csi-mapping-config + items: + - key: csi-mapping-config-json + path: cluster-mapping.json - name: keys-tmp-dir emptyDir: { medium: "Memory" diff --git a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml index 1d1c9bfe10a6..fe79929f221a 100644 --- a/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml +++ b/cluster/examples/kubernetes/ceph/csi/template/rbd/csi-rbdplugin.yaml @@ -18,7 +18,7 @@ spec: {{ $key }}: "{{ $value }}" {{ end }} spec: - serviceAccount: rook-csi-rbd-plugin-sa + serviceAccountName: rook-csi-rbd-plugin-sa {{ if .PluginPriorityClassName }} priorityClassName: {{ .PluginPriorityClassName }} {{ end }} @@ -67,6 +67,7 @@ spec: - "--metricsport={{ .RBDGRPCMetricsPort }}" - "--metricspath=/metrics" - "--enablegrpcmetrics={{ .EnableCSIGRPCMetrics }}" + - "--stagingpath={{ .KubeletDirPath }}/plugins/kubernetes.io/csi/pv/" env: - name: POD_IP valueFrom: @@ -99,7 +100,7 @@ spec: - mountPath: /lib/modules name: lib-modules readOnly: true - - name: ceph-csi-config + - name: ceph-csi-configs mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys @@ -153,12 +154,21 @@ spec: - name: lib-modules hostPath: path: /lib/modules - - name: ceph-csi-config - configMap: - name: rook-ceph-csi-config - items: - - key: csi-cluster-config-json - path: config.json + - name: ceph-csi-configs + projected: + sources: + - name: ceph-csi-config + configMap: + name: rook-ceph-csi-config + items: + - key: csi-cluster-config-json + path: config.json + - name: ceph-csi-mapping-config + configMap: + name: rook-ceph-csi-mapping-config + items: + - key: csi-mapping-config-json + path: cluster-mapping.json - name: keys-tmp-dir emptyDir: { medium: "Memory" diff --git a/cluster/examples/kubernetes/ceph/direct-mount.yaml b/cluster/examples/kubernetes/ceph/direct-mount.yaml index ad0af7779ef8..64a466b8ea8d 100644 --- a/cluster/examples/kubernetes/ceph/direct-mount.yaml +++ b/cluster/examples/kubernetes/ceph/direct-mount.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-direct-mount - image: rook/ceph:master + image: rook/ceph:v1.7.8 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/cluster/examples/kubernetes/ceph/filesystem.yaml b/cluster/examples/kubernetes/ceph/filesystem.yaml index 89746aac374d..eedd7181d8d9 100644 --- a/cluster/examples/kubernetes/ceph/filesystem.yaml +++ b/cluster/examples/kubernetes/ceph/filesystem.yaml @@ -17,7 +17,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool @@ -33,7 +33,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool diff --git a/cluster/examples/kubernetes/ceph/images.txt b/cluster/examples/kubernetes/ceph/images.txt new file mode 100644 index 000000000000..0a54ce1a6225 --- /dev/null +++ b/cluster/examples/kubernetes/ceph/images.txt @@ -0,0 +1,9 @@ + k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 + k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 + k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 + k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + quay.io/ceph/ceph:v16.2.6 + quay.io/cephcsi/cephcsi:v3.4.0 + quay.io/csiaddons/volumereplication-operator:v0.1.0 + rook/ceph:v1.7.8 diff --git a/cluster/examples/kubernetes/ceph/monitoring/keda-rgw.yaml b/cluster/examples/kubernetes/ceph/monitoring/keda-rgw.yaml new file mode 100644 index 000000000000..de24eea2847b --- /dev/null +++ b/cluster/examples/kubernetes/ceph/monitoring/keda-rgw.yaml @@ -0,0 +1,19 @@ +apiVersion: keda.k8s.io/v1alpha1 +kind: ScaledObject +metadata: + name: rgw-scale + namespace: rook-ceph +spec: + scaleTargetRef: + kind: Deployment + deploymentName: rook-ceph-rgw-my-store-a + minReplicaCount: 1 + maxReplicaCount: 5 + triggers: + - type: prometheus + metadata: + serverAddress: http://rook-prometheus.rook-ceph.svc:9090 + metricName: ceph_rgw_put_collector + query: | + sum(rate(ceph_rgw_put[2m])) + threshold: "90" diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml index cf8488efb631..5c1e5df3e872 100644 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml +++ b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules-external.yaml @@ -12,10 +12,8 @@ spec: rules: - alert: PersistentVolumeUsageNearFull annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 75%. Free up some space. - message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion - is required. + description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed 75%. Free up some space or expand the PVC. + message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion or PVC expansion is required. severity_level: warning storage_type: ceph expr: | @@ -25,10 +23,8 @@ spec: severity: warning - alert: PersistentVolumeUsageCritical annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 85%. Free up some space immediately. - message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data - deletion is required. + description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed 85%. Free up some space or expand the PVC immediately. + message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data deletion or PVC expansion is required. severity_level: error storage_type: ceph expr: | @@ -36,3 +32,4 @@ spec: for: 5s labels: severity: critical + diff --git a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml index 0538c572de26..16530fb9382a 100644 --- a/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml +++ b/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml @@ -11,7 +11,7 @@ spec: - name: ceph.rules rules: - expr: | - kube_node_status_condition{condition="Ready",job="kube-state-metrics",status="true"} * on (node) group_right() max(label_replace(ceph_disk_occupation{job="rook-ceph-mgr"},"node","$1","exported_instance","(.*)")) by (node) + kube_node_status_condition{condition="Ready",job="kube-state-metrics",status="true"} * on (node) group_right() max(label_replace(ceph_disk_occupation{job="rook-ceph-mgr"},"node","$1","exported_instance","(.*)")) by (node, namespace) record: cluster:ceph_node_down:join_kube - expr: | avg(topk by (ceph_daemon) (1, label_replace(label_replace(ceph_disk_occupation{job="rook-ceph-mgr"}, "instance", "$1", "exported_instance", "(.*)"), "device", "$1", "device", "/dev/(.*)")) * on(instance, device) group_right(ceph_daemon) topk by (instance,device) (1,(irate(node_disk_read_time_seconds_total[1m]) + irate(node_disk_write_time_seconds_total[1m]) / (clamp_min(irate(node_disk_reads_completed_total[1m]), 1) + irate(node_disk_writes_completed_total[1m]))))) @@ -42,7 +42,7 @@ spec: severity_level: critical storage_type: ceph expr: | - absent(up{job="rook-ceph-mgr"} == 1) + label_replace((up{job="rook-ceph-mgr"} == 0 or absent(up{job="rook-ceph-mgr"})), "namespace", "openshift-storage", "", "") for: 5m labels: severity: critical @@ -53,7 +53,7 @@ spec: severity_level: warning storage_type: ceph expr: | - sum(up{job="rook-ceph-mgr"}) < 1 + sum(kube_deployment_spec_replicas{deployment=~"rook-ceph-mgr-.*"}) by (namespace) < 1 for: 5m labels: severity: warning @@ -61,13 +61,12 @@ spec: rules: - alert: CephMdsMissingReplicas annotations: - description: Minimum required replicas for storage metadata service not available. - Might affect the working of storage cluster. + description: Minimum required replicas for storage metadata service not available. Might affect the working of storage cluster. message: Insufficient replicas for storage metadata service. severity_level: warning storage_type: ceph expr: | - sum(ceph_mds_metadata{job="rook-ceph-mgr"} == 1) < 2 + sum(ceph_mds_metadata{job="rook-ceph-mgr"} == 1) by (namespace) < 2 for: 5m labels: severity: warning @@ -80,14 +79,24 @@ spec: severity_level: error storage_type: ceph expr: | - count(ceph_mon_quorum_status{job="rook-ceph-mgr"} == 1) <= ((count(ceph_mon_metadata{job="rook-ceph-mgr"}) % 2) + 1) + count(ceph_mon_quorum_status{job="rook-ceph-mgr"} == 1) by (namespace) <= (floor(count(ceph_mon_metadata{job="rook-ceph-mgr"}) by (namespace) / 2) + 1) for: 15m labels: severity: critical + - alert: CephMonQuorumLost + annotations: + description: Storage cluster quorum is lost. Contact Support. + message: Storage quorum is lost + severity_level: critical + storage_type: ceph + expr: | + count(kube_pod_status_phase{pod=~"rook-ceph-mon-.*", phase=~"Running|running"} == 1) by (namespace) < 2 + for: 5m + labels: + severity: critical - alert: CephMonHighNumberOfLeaderChanges annotations: - description: Ceph Monitor {{ $labels.ceph_daemon }} on host {{ $labels.hostname - }} has seen {{ $value | printf "%.2f" }} leader changes per minute recently. + description: Ceph Monitor {{ $labels.ceph_daemon }} on host {{ $labels.hostname }} has seen {{ $value | printf "%.2f" }} leader changes per minute recently. message: Storage Cluster has seen many leader changes recently. severity_level: warning storage_type: ceph @@ -100,8 +109,7 @@ spec: rules: - alert: CephNodeDown annotations: - description: Storage node {{ $labels.node }} went down. Please check the node - immediately. + description: Storage node {{ $labels.node }} went down. Please check the node immediately. message: Storage node {{ $labels.node }} went down severity_level: error storage_type: ceph @@ -114,22 +122,18 @@ spec: rules: - alert: CephOSDCriticallyFull annotations: - description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class - type {{$labels.device_class}} has crossed 80% on host {{ $labels.hostname - }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. + description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class type {{$labels.device_class}} has crossed 80% on host {{ $labels.hostname }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. message: Back-end storage device is critically full. severity_level: error storage_type: ceph expr: | - (ceph_osd_metadata * on (ceph_daemon) group_right(device_class) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.80 + (ceph_osd_metadata * on (ceph_daemon) group_right(device_class,hostname) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.80 for: 40s labels: severity: critical - alert: CephOSDFlapping annotations: - description: Storage daemon {{ $labels.ceph_daemon }} has restarted 5 times - in last 5 minutes. Please check the pod events or ceph status to find out - the cause. + description: Storage daemon {{ $labels.ceph_daemon }} has restarted 5 times in last 5 minutes. Please check the pod events or ceph status to find out the cause. message: Ceph storage osd flapping. severity_level: error storage_type: ceph @@ -140,33 +144,29 @@ spec: severity: critical - alert: CephOSDNearFull annotations: - description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class - type {{$labels.device_class}} has crossed 75% on host {{ $labels.hostname - }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. + description: Utilization of storage device {{ $labels.ceph_daemon }} of device_class type {{$labels.device_class}} has crossed 75% on host {{ $labels.hostname }}. Immediately free up some space or add capacity of type {{$labels.device_class}}. message: Back-end storage device is nearing full. severity_level: warning storage_type: ceph expr: | - (ceph_osd_metadata * on (ceph_daemon) group_right(device_class) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.75 + (ceph_osd_metadata * on (ceph_daemon) group_right(device_class,hostname) (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes)) >= 0.75 for: 40s labels: severity: warning - alert: CephOSDDiskNotResponding annotations: - description: Disk device {{ $labels.device }} not responding, on host {{ $labels.host - }}. + description: Disk device {{ $labels.device }} not responding, on host {{ $labels.host }}. message: Disk not responding severity_level: error storage_type: ceph expr: | label_replace((ceph_osd_in == 1 and ceph_osd_up == 0),"disk","$1","ceph_daemon","osd.(.*)") + on(ceph_daemon) group_left(host, device) label_replace(ceph_disk_occupation,"host","$1","exported_instance","(.*)") - for: 1m + for: 15m labels: severity: critical - alert: CephOSDDiskUnavailable annotations: - description: Disk device {{ $labels.device }} not accessible on host {{ $labels.host - }}. + description: Disk device {{ $labels.device }} not accessible on host {{ $labels.host }}. message: Disk not accessible severity_level: error storage_type: ceph @@ -177,8 +177,7 @@ spec: severity: critical - alert: CephOSDSlowOps annotations: - description: '{{ $value }} Ceph OSD requests are taking too long to process. - Please check ceph status to find out the cause.' + description: '{{ $value }} Ceph OSD requests are taking too long to process. Please check ceph status to find out the cause.' message: OSD requests are taking too long to process. severity_level: warning storage_type: ceph @@ -213,10 +212,8 @@ spec: rules: - alert: PersistentVolumeUsageNearFull annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 75%. Free up some space or expand the PVC. - message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion - or PVC expansion is required. + description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed 75%. Free up some space or expand the PVC. + message: PVC {{ $labels.persistentvolumeclaim }} is nearing full. Data deletion or PVC expansion is required. severity_level: warning storage_type: ceph expr: | @@ -226,10 +223,8 @@ spec: severity: warning - alert: PersistentVolumeUsageCritical annotations: - description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed - 85%. Free up some space or expand the PVC immediately. - message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data - deletion or PVC expansion is required. + description: PVC {{ $labels.persistentvolumeclaim }} utilization has crossed 85%. Free up some space or expand the PVC immediately. + message: PVC {{ $labels.persistentvolumeclaim }} is critically full. Data deletion or PVC expansion is required. severity_level: error storage_type: ceph expr: | @@ -258,30 +253,28 @@ spec: storage_type: ceph expr: | ceph_health_status{job="rook-ceph-mgr"} == 1 - for: 10m + for: 15m labels: severity: warning - alert: CephOSDVersionMismatch annotations: - description: There are {{ $value }} different versions of Ceph OSD components - running. + description: There are {{ $value }} different versions of Ceph OSD components running. message: There are multiple versions of storage services running. severity_level: warning storage_type: ceph expr: | - count(count(ceph_osd_metadata{job="rook-ceph-mgr"}) by (ceph_version)) > 1 + count(count(ceph_osd_metadata{job="rook-ceph-mgr"}) by (ceph_version, namespace)) by (ceph_version, namespace) > 1 for: 10m labels: severity: warning - alert: CephMonVersionMismatch annotations: - description: There are {{ $value }} different versions of Ceph Mon components - running. + description: There are {{ $value }} different versions of Ceph Mon components running. message: There are multiple versions of storage services running. severity_level: warning storage_type: ceph expr: | - count(count(ceph_mon_metadata{job="rook-ceph-mgr"}) by (ceph_version)) > 1 + count(count(ceph_mon_metadata{job="rook-ceph-mgr", ceph_version != ""}) by (ceph_version)) > 1 for: 10m labels: severity: warning @@ -289,10 +282,8 @@ spec: rules: - alert: CephClusterNearFull annotations: - description: Storage cluster utilization has crossed 75% and will become read-only - at 85%. Free up some space or expand the storage cluster. - message: Storage cluster is nearing full. Data deletion or cluster expansion - is required. + description: Storage cluster utilization has crossed 75% and will become read-only at 85%. Free up some space or expand the storage cluster. + message: Storage cluster is nearing full. Data deletion or cluster expansion is required. severity_level: warning storage_type: ceph expr: | @@ -302,10 +293,8 @@ spec: severity: warning - alert: CephClusterCriticallyFull annotations: - description: Storage cluster utilization has crossed 80% and will become read-only - at 85%. Free up some space or expand the storage cluster immediately. - message: Storage cluster is critically full and needs immediate data deletion - or cluster expansion. + description: Storage cluster utilization has crossed 80% and will become read-only at 85%. Free up some space or expand the storage cluster immediately. + message: Storage cluster is critically full and needs immediate data deletion or cluster expansion. severity_level: error storage_type: ceph expr: | @@ -315,10 +304,8 @@ spec: severity: critical - alert: CephClusterReadOnly annotations: - description: Storage cluster utilization has crossed 85% and will become read-only - now. Free up some space or expand the storage cluster immediately. - message: Storage cluster is read-only now and needs immediate data deletion - or cluster expansion. + description: Storage cluster utilization has crossed 85% and will become read-only now. Free up some space or expand the storage cluster immediately. + message: Storage cluster is read-only now and needs immediate data deletion or cluster expansion. severity_level: error storage_type: ceph expr: | @@ -349,4 +336,4 @@ spec: (ceph_pool_stored_raw * on (pool_id) group_left(name)ceph_pool_metadata) / ((ceph_pool_quota_bytes * on (pool_id) group_left(name)ceph_pool_metadata) > 0) > 0.90 for: 1m labels: - severity: critical \ No newline at end of file + severity: critical diff --git a/cluster/examples/kubernetes/ceph/nfs-test.yaml b/cluster/examples/kubernetes/ceph/nfs-test.yaml index 46770bdb62b6..d200997e7108 100644 --- a/cluster/examples/kubernetes/ceph/nfs-test.yaml +++ b/cluster/examples/kubernetes/ceph/nfs-test.yaml @@ -4,11 +4,12 @@ metadata: name: my-nfs namespace: rook-ceph # namespace:cluster spec: + # rados settings aren't necessary in Ceph Versions equal to or greater than Pacific 16.2.7 rados: - # RADOS pool where NFS client recovery data is stored. - # In this example the data pool for the "myfs" filesystem is used. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 + poolConfig: + failureDomain: osd + replicated: + size: 1 # RADOS namespace where NFS client recovery data is stored in the pool. namespace: nfs-ns # Settings for the NFS server diff --git a/cluster/examples/kubernetes/ceph/nfs.yaml b/cluster/examples/kubernetes/ceph/nfs.yaml index 86c99a2c53d2..23758eb49c62 100644 --- a/cluster/examples/kubernetes/ceph/nfs.yaml +++ b/cluster/examples/kubernetes/ceph/nfs.yaml @@ -5,11 +5,11 @@ metadata: namespace: rook-ceph # namespace:cluster spec: rados: - # RADOS pool where NFS client recovery data is stored, must be a replica pool. EC pools don't support omap which is required by ganesha. - # In this example the data pool for the "myfs" filesystem is used. Separate pool for storing ganesha recovery data is recommended. - # Due to this dashboard issue https://tracker.ceph.com/issues/46176. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 + # The Ganesha pool spec. Must use replication. + poolConfig: + failureDomain: host + replicated: + size: 3 # RADOS namespace where NFS client recovery data is stored in the pool. namespace: nfs-ns # Settings for the NFS server diff --git a/cluster/examples/kubernetes/ceph/object-ec.yaml b/cluster/examples/kubernetes/ceph/object-ec.yaml index cc0448e57008..8dabb8fde163 100644 --- a/cluster/examples/kubernetes/ceph/object-ec.yaml +++ b/cluster/examples/kubernetes/ceph/object-ec.yaml @@ -20,7 +20,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -33,7 +33,7 @@ spec: codingChunks: 1 parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -87,6 +87,8 @@ spec: bucket: disabled: false interval: 60s - # Configure the pod liveness probe for the rgw daemon + # Configure the pod probes for the rgw daemon livenessProbe: disabled: false + readinessProbe: + disabled: false diff --git a/cluster/examples/kubernetes/ceph/object-openshift.yaml b/cluster/examples/kubernetes/ceph/object-openshift.yaml index c2ce7b9ed5a9..1ad50edc1fa4 100644 --- a/cluster/examples/kubernetes/ceph/object-openshift.yaml +++ b/cluster/examples/kubernetes/ceph/object-openshift.yaml @@ -20,7 +20,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -35,7 +35,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -101,9 +101,11 @@ spec: bucket: disabled: false interval: 60s - # Configure the pod liveness probe for the rgw daemon + # Configure the pod probes for the rgw daemon livenessProbe: disabled: false + readinessProbe: + disabled: false # security oriented settings # security: # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file diff --git a/cluster/examples/kubernetes/ceph/object-user.yaml b/cluster/examples/kubernetes/ceph/object-user.yaml index bf2b6b41fa78..8ae3d24132d0 100644 --- a/cluster/examples/kubernetes/ceph/object-user.yaml +++ b/cluster/examples/kubernetes/ceph/object-user.yaml @@ -11,3 +11,15 @@ metadata: spec: store: my-store displayName: "my display name" + # Quotas set on the user + # quotas: + # maxBuckets: 100 + # maxSize: 10G + # maxObjects: 10000 + # Additional permissions given to the user + # capabilities: + # user: "*" + # bucket: "*" + # metadata: "*" + # usage: "*" + # zone: "*" diff --git a/cluster/examples/kubernetes/ceph/object.yaml b/cluster/examples/kubernetes/ceph/object.yaml index 430c9c60d89e..e0c4d64d7e1a 100644 --- a/cluster/examples/kubernetes/ceph/object.yaml +++ b/cluster/examples/kubernetes/ceph/object.yaml @@ -20,7 +20,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -35,7 +35,7 @@ spec: requireSafeReplicaSize: true parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size @@ -46,6 +46,8 @@ spec: gateway: # A reference to the secret in the rook namespace where the ssl certificate is stored # sslCertificateRef: + # A reference to the secret in the rook namespace where the ca bundle is stored + # caBundleRef: # The port that RGW pods will listen on (http) port: 80 # The port that RGW pods will listen on (https). An ssl certificate is required. @@ -105,9 +107,11 @@ spec: bucket: disabled: false interval: 60s - # Configure the pod liveness probe for the rgw daemon + # Configure the pod probes for the rgw daemon livenessProbe: disabled: false + readinessProbe: + disabled: false # security oriented settings # security: # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/cluster/examples/kubernetes/ceph/operator-openshift.yaml index 5ce9a35ee39c..ab55a3fab517 100644 --- a/cluster/examples/kubernetes/ceph/operator-openshift.yaml +++ b/cluster/examples/kubernetes/ceph/operator-openshift.yaml @@ -14,9 +14,9 @@ allowPrivilegedContainer: true allowHostNetwork: true allowHostDirVolumePlugin: true priority: -allowedCapabilities: [] allowHostPorts: true allowHostPID: true # remove this once we drop support for Nautilus +allowedCapabilities: ["MKNOD"] allowHostIPC: true readOnlyRootFilesystem: false requiredDropCapabilities: [] @@ -117,6 +117,9 @@ data: # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. # CSI_LOG_LEVEL: "0" + # Set replicas for csi provisioner deployment. + CSI_PROVISIONER_REPLICAS: "2" + # OMAP generator generates the omap mapping between the PV name and the RBD image # which helps CSI to identify the rbd images for CSI operations. # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. @@ -141,19 +144,19 @@ data: # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType" + CSI_CEPHFS_FSGROUPPOLICY: "None" # (Optional) Allow starting unsupported ceph-csi image ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" # The default version of CSI supported by Rook will be started. To change the version # of the CSI driver to something other than what is officially supported, change # these images to the desired release of the CSI driver. - # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.3.1" - # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" - # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" + # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" + # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.3.0" # (Optional) set user created priorityclassName for csi plugin pods. # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" @@ -400,8 +403,12 @@ data: # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. ROOK_ENABLE_DISCOVERY_DAEMON: "false" - # Enable volume replication controller + # Enable the volume replication controller + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" + # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. + ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" # (Optional) Admission controller NodeAffinity. @@ -439,7 +446,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:master + image: rook/ceph:v1.7.8 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/cluster/examples/kubernetes/ceph/operator.yaml index b531ce87e0ea..a80dfef688d5 100644 --- a/cluster/examples/kubernetes/ceph/operator.yaml +++ b/cluster/examples/kubernetes/ceph/operator.yaml @@ -41,6 +41,9 @@ data: # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. # CSI_LOG_LEVEL: "0" + # Set replicas for csi provisioner deployment. + CSI_PROVISIONER_REPLICAS: "2" + # OMAP generator will generate the omap mapping between the PV name and the RBD image. # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable @@ -65,19 +68,19 @@ data: # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html - CSI_CEPHFS_FSGROUPPOLICY: "ReadWriteOnceWithFSType" + CSI_CEPHFS_FSGROUPPOLICY: "None" # (Optional) Allow starting unsupported ceph-csi image ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" # The default version of CSI supported by Rook will be started. To change the version # of the CSI driver to something other than what is officially supported, change # these images to the desired release of the CSI driver. - # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.3.1" - # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" - # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" + # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" + # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" + # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.3.0" # (Optional) set user created priorityclassName for csi plugin pods. # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" @@ -324,7 +327,11 @@ data: # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. ROOK_ENABLE_DISCOVERY_DAEMON: "false" - # Enable volume replication controller + # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. + ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" + # Enable the volume replication controller. + # Before enabling, ensure the Volume Replication CRDs are created. + # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring CSI_ENABLE_VOLUME_REPLICATION: "false" # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" @@ -362,7 +369,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:master + image: rook/ceph:v1.7.8 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/osd-purge.yaml b/cluster/examples/kubernetes/ceph/osd-purge.yaml index a472ce71a552..732aaa235bca 100644 --- a/cluster/examples/kubernetes/ceph/osd-purge.yaml +++ b/cluster/examples/kubernetes/ceph/osd-purge.yaml @@ -25,10 +25,11 @@ spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal - image: rook/ceph:master + image: rook/ceph:v1.7.8 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". - args: ["ceph", "osd", "remove", "--osd-ids", ""] + # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. + args: ["ceph", "osd", "remove", "--preserve-pvc", "false", "--osd-ids", ""] env: - name: POD_NAMESPACE valueFrom: diff --git a/cluster/examples/kubernetes/ceph/pool-mirrored.yaml b/cluster/examples/kubernetes/ceph/pool-mirrored.yaml new file mode 100644 index 000000000000..7fe22d1980e5 --- /dev/null +++ b/cluster/examples/kubernetes/ceph/pool-mirrored.yaml @@ -0,0 +1,16 @@ +################################################################################################################# +# Create a mirroring enabled Ceph pool. +# kubectl create -f pool-mirrored.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: mirrored-pool + namespace: rook-ceph +spec: + replicated: + size: 3 + mirroring: + enabled: true + mode: image diff --git a/cluster/examples/kubernetes/ceph/pool.yaml b/cluster/examples/kubernetes/ceph/pool.yaml index fae98396071a..da3c7ebd395d 100644 --- a/cluster/examples/kubernetes/ceph/pool.yaml +++ b/cluster/examples/kubernetes/ceph/pool.yaml @@ -26,10 +26,10 @@ spec: # The name of the failure domain to place further down replicas # subFailureDomain: host # Ceph CRUSH root location of the rule - # For reference: https://docs.ceph.com/docs/nautilus/rados/operations/crush-map/#types-and-buckets + # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets #crushRoot: my-root # The Ceph CRUSH device class associated with the CRUSH replicated rule - # For reference: https://docs.ceph.com/docs/nautilus/rados/operations/crush-map/#device-classes + # For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#device-classes #deviceClass: my-class # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics @@ -38,7 +38,7 @@ spec: # see https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values parameters: # Inline compression mode for the data pool - # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression compression_mode: none # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size diff --git a/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml b/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml index 4554f44bb9ab..80a58ffe252d 100644 --- a/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml +++ b/cluster/examples/kubernetes/ceph/pre-k8s-1.16/crds.yaml @@ -684,6 +684,10 @@ spec: enum: - image - pool + peers: + properties: + secretNames: + type: array snapshotSchedules: type: object properties: @@ -773,4 +777,4 @@ spec: scope: Namespaced version: v1 subresources: - status: {} \ No newline at end of file + status: {} diff --git a/cluster/examples/kubernetes/ceph/toolbox-job.yaml b/cluster/examples/kubernetes/ceph/toolbox-job.yaml index 3fee97919082..8b9efb297bfc 100644 --- a/cluster/examples/kubernetes/ceph/toolbox-job.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:master + image: rook/ceph:v1.7.8 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -32,7 +32,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:master + image: rook/ceph:v1.7.8 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/cluster/examples/kubernetes/ceph/toolbox.yaml b/cluster/examples/kubernetes/ceph/toolbox.yaml index 28edc958b4bf..463504766a7d 100644 --- a/cluster/examples/kubernetes/ceph/toolbox.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:master + image: rook/ceph:v1.7.8 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/cluster/examples/kubernetes/ceph/volume-replication-class.yaml b/cluster/examples/kubernetes/ceph/volume-replication-class.yaml new file mode 100644 index 000000000000..5700285cf2ea --- /dev/null +++ b/cluster/examples/kubernetes/ceph/volume-replication-class.yaml @@ -0,0 +1,12 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: rbd-volumereplicationclass +spec: + provisioner: rook-ceph.rbd.csi.ceph.com + parameters: + mirroringMode: snapshot + schedulingInterval: "12m" + schedulingStartTime: "16:18:43" + replication.storage.openshift.io/replication-secret-name: rook-csi-rbd-provisioner + replication.storage.openshift.io/replication-secret-namespace: rook-ceph diff --git a/cluster/examples/kubernetes/ceph/volume-replication.yaml b/cluster/examples/kubernetes/ceph/volume-replication.yaml new file mode 100644 index 000000000000..8b26e369d53a --- /dev/null +++ b/cluster/examples/kubernetes/ceph/volume-replication.yaml @@ -0,0 +1,11 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplication +metadata: + name: pvc-volumereplication +spec: + volumeReplicationClass: rbd-volumereplicationclass + replicationState: primary + dataSource: + apiGroup: "" + kind: PersistentVolumeClaim + name: rbd-pvc # Name of the PVC on which mirroring is to be enabled. diff --git a/cluster/examples/kubernetes/nfs/busybox-rc.yaml b/cluster/examples/kubernetes/nfs/busybox-rc.yaml deleted file mode 100644 index 4b5c8fc24ac9..000000000000 --- a/cluster/examples/kubernetes/nfs/busybox-rc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: nfs-demo - role: busybox - name: nfs-busybox -spec: - replicas: 2 - selector: - matchLabels: - app: nfs-demo - role: busybox - template: - metadata: - labels: - app: nfs-demo - role: busybox - spec: - containers: - - image: busybox - command: - - sh - - -c - - "while true; do date > /mnt/index.html; hostname >> /mnt/index.html; sleep $(($RANDOM % 5 + 5)); done" - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - # name must match the volume name below - - name: rook-nfs-vol - mountPath: "/mnt" - volumes: - - name: rook-nfs-vol - persistentVolumeClaim: - claimName: rook-nfs-pv-claim diff --git a/cluster/examples/kubernetes/nfs/crds.yaml b/cluster/examples/kubernetes/nfs/crds.yaml deleted file mode 100644 index f47ffe1972a3..000000000000 --- a/cluster/examples/kubernetes/nfs/crds.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c - creationTimestamp: null - name: nfsservers.nfs.rook.io -spec: - group: nfs.rook.io - names: - kind: NFSServer - listKind: NFSServerList - plural: nfsservers - singular: nfsserver - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - - description: NFS Server instance state - jsonPath: .status.state - name: State - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: NFSServer is the Schema for the nfsservers API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NFSServerSpec represents the spec of NFS daemon - properties: - annotations: - additionalProperties: - type: string - description: The annotations-related configuration to add/set on each Pod related object. - type: object - exports: - description: The parameters to configure the NFS export - items: - description: ExportsSpec represents the spec of NFS exports - properties: - name: - description: Name of the export - type: string - persistentVolumeClaim: - description: PVC from which the NFS daemon gets storage for sharing - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. Default false. - type: boolean - required: - - claimName - type: object - server: - description: The NFS server configuration - properties: - accessMode: - description: Reading and Writing permissions on the export Valid values are "ReadOnly", "ReadWrite" and "none" - enum: - - ReadOnly - - ReadWrite - - none - type: string - allowedClients: - description: The clients allowed to access the NFS export - items: - description: AllowedClientsSpec represents the client specs for accessing the NFS export - properties: - accessMode: - description: Reading and Writing permissions for the client to access the NFS export Valid values are "ReadOnly", "ReadWrite" and "none" Gets overridden when ServerSpec.accessMode is specified - enum: - - ReadOnly - - ReadWrite - - none - type: string - clients: - description: The clients that can access the share Values can be hostname, ip address, netgroup, CIDR network address, or all - items: - type: string - type: array - name: - description: Name of the clients group - type: string - squash: - description: Squash options for clients Valid values are "none", "rootid", "root", and "all" Gets overridden when ServerSpec.squash is specified - enum: - - none - - rootid - - root - - all - type: string - type: object - type: array - squash: - description: This prevents the root users connected remotely from having root privileges Valid values are "none", "rootid", "root", and "all" - enum: - - none - - rootid - - root - - all - type: string - type: object - type: object - type: array - replicas: - description: Replicas of the NFS daemon - type: integer - type: object - status: - description: NFSServerStatus defines the observed state of NFSServer - properties: - message: - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/cluster/examples/kubernetes/nfs/nfs-ceph.yaml b/cluster/examples/kubernetes/nfs/nfs-ceph.yaml deleted file mode 100644 index fbdc51dabbf6..000000000000 --- a/cluster/examples/kubernetes/nfs/nfs-ceph.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# A rook ceph cluster must be running -# Create a rook ceph cluster using examples in rook/cluster/examples/kubernetes/ceph -# Refer to https://rook.io/docs/rook/master/ceph-quickstart.html for a quick rook cluster setup -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-ceph-claim - namespace: rook-nfs -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - # Create a Ceph cluster for using this example - # Create a ceph PVC after creating the rook ceph cluster using ceph-pvc.yaml - persistentVolumeClaim: - claimName: nfs-ceph-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/nfs-xfs.yaml b/cluster/examples/kubernetes/nfs/nfs-xfs.yaml deleted file mode 100644 index 2a85ff0324fc..000000000000 --- a/cluster/examples/kubernetes/nfs/nfs-xfs.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# A storage class with name standard-xfs must be present. -# The storage class must be has xfs filesystem type and prjquota mountOptions. -# This is example storage class for google compute engine pd -# --- -# apiVersion: storage.k8s.io/v1 -# kind: StorageClass -# metadata: -# name: standard-xfs -# parameters: -# type: pd-standard -# fsType: xfs -# mountOptions: -# - prjquota -# provisioner: kubernetes.io/gce-pd -# reclaimPolicy: Delete -# volumeBindingMode: Immediate -# allowVolumeExpansion: true -# -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-xfs-claim - namespace: rook-nfs -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-xfs-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/nfs.yaml b/cluster/examples/kubernetes/nfs/nfs.yaml deleted file mode 100644 index 742fcf9de8af..000000000000 --- a/cluster/examples/kubernetes/nfs/nfs.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# A default storageclass must be present -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim - namespace: rook-nfs -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - server: - accessMode: ReadWrite - squash: "none" - # A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-default-claim - # A key/value list of annotations - annotations: - rook: nfs diff --git a/cluster/examples/kubernetes/nfs/operator.yaml b/cluster/examples/kubernetes/nfs/operator.yaml deleted file mode 100644 index b28990977d94..000000000000 --- a/cluster/examples/kubernetes/nfs/operator.yaml +++ /dev/null @@ -1,136 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs-system # namespace:operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-nfs-operator -subjects: - - kind: ServiceAccount - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: rook-nfs-operator -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - list - - get - - watch - - create - - apiGroups: - - "" - resources: - - services - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - get - - list - - patch - - update - - watch - - apiGroups: - - nfs.rook.io - resources: - - nfsservers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - nfs.rook.io - resources: - - nfsservers/status - - nfsservers/finalizers - verbs: - - get - - patch - - update ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-operator - namespace: rook-nfs-system # namespace:operator - labels: - app: rook-nfs-operator -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-operator - template: - metadata: - labels: - app: rook-nfs-operator - spec: - serviceAccountName: rook-nfs-operator - containers: - - name: rook-nfs-operator - image: rook/nfs:master - imagePullPolicy: IfNotPresent - args: ["nfs", "operator"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace diff --git a/cluster/examples/kubernetes/nfs/psp.yaml b/cluster/examples/kubernetes/nfs/psp.yaml deleted file mode 100644 index c6105111080d..000000000000 --- a/cluster/examples/kubernetes/nfs/psp.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: rook-nfs-policy -spec: - privileged: true - fsGroup: - rule: RunAsAny - allowedCapabilities: - - DAC_READ_SEARCH - - SYS_RESOURCE - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - hostPath diff --git a/cluster/examples/kubernetes/nfs/pvc.yaml b/cluster/examples/kubernetes/nfs/pvc.yaml deleted file mode 100644 index 789de6b86e05..000000000000 --- a/cluster/examples/kubernetes/nfs/pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share1" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi diff --git a/cluster/examples/kubernetes/nfs/rbac.yaml b/cluster/examples/kubernetes/nfs/rbac.yaml deleted file mode 100644 index 3f1224d0fbe0..000000000000 --- a/cluster/examples/kubernetes/nfs/rbac.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: rook-nfs ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: rook-nfs ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - resourceNames: ["rook-nfs-policy"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -subjects: - - kind: ServiceAccount - name: - rook-nfs-server - # replace with namespace where provisioner is deployed - namespace: rook-nfs -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io diff --git a/cluster/examples/kubernetes/nfs/sc.yaml b/cluster/examples/kubernetes/nfs/sc.yaml deleted file mode 100644 index 2ad62ed6bec1..000000000000 --- a/cluster/examples/kubernetes/nfs/sc.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: rook-nfs-share1 -parameters: - exportName: share1 - nfsServerName: rook-nfs - nfsServerNamespace: rook-nfs -provisioner: nfs.rook.io/rook-nfs-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate diff --git a/cluster/examples/kubernetes/nfs/scc.yaml b/cluster/examples/kubernetes/nfs/scc.yaml deleted file mode 100644 index 4c939ddcd4d5..000000000000 --- a/cluster/examples/kubernetes/nfs/scc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: rook-nfs -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegedContainer: false -allowedCapabilities: - - SYS_ADMIN - - DAC_READ_SEARCH -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: - - KILL - - MKNOD - - SYS_CHROOT -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret -users: - - system:serviceaccount:rook-nfs:rook-nfs-server diff --git a/cluster/examples/kubernetes/nfs/web-rc.yaml b/cluster/examples/kubernetes/nfs/web-rc.yaml deleted file mode 100644 index 92987c8c1b26..000000000000 --- a/cluster/examples/kubernetes/nfs/web-rc.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: nfs-demo - role: web-frontend - name: nfs-web -spec: - replicas: 2 - selector: - matchLabels: - app: nfs-demo - role: web-frontend - template: - metadata: - labels: - app: nfs-demo - role: web-frontend - spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - volumeMounts: - # name must match the volume name below - - name: rook-nfs-vol - mountPath: "/usr/share/nginx/html" - volumes: - - name: rook-nfs-vol - persistentVolumeClaim: - claimName: rook-nfs-pv-claim diff --git a/cluster/examples/kubernetes/nfs/web-service.yaml b/cluster/examples/kubernetes/nfs/web-service.yaml deleted file mode 100644 index b73cac2bc944..000000000000 --- a/cluster/examples/kubernetes/nfs/web-service.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-web -spec: - ports: - - port: 80 - selector: - role: web-frontend diff --git a/cluster/examples/kubernetes/nfs/webhook.yaml b/cluster/examples/kubernetes/nfs/webhook.yaml deleted file mode 100644 index af0a918c8836..000000000000 --- a/cluster/examples/kubernetes/nfs/webhook.yaml +++ /dev/null @@ -1,128 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -rules: - - apiGroups: [""] - resources: ["secrets"] - resourceNames: - - "rook-nfs-webhook-cert" - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-nfs-webhook -subjects: - - apiGroup: "" - kind: ServiceAccount - name: rook-nfs-webhook - namespace: rook-nfs-system ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: rook-nfs-webhook-cert - namespace: rook-nfs-system -spec: - dnsNames: - - rook-nfs-webhook.rook-nfs-system.svc - - rook-nfs-webhook.rook-nfs-system.svc.cluster.local - issuerRef: - kind: Issuer - name: rook-nfs-selfsigned-issuer - secretName: rook-nfs-webhook-cert ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: rook-nfs-selfsigned-issuer - namespace: rook-nfs-system -spec: - selfSigned: {} ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: rook-nfs-system/rook-nfs-webhook-cert - creationTimestamp: null - name: rook-nfs-validating-webhook-configuration -webhooks: - - clientConfig: - caBundle: Cg== - service: - name: rook-nfs-webhook - namespace: rook-nfs-system - path: /validate-nfs-rook-io-v1alpha1-nfsserver - failurePolicy: Fail - name: validation.nfsserver.nfs.rook.io - rules: - - apiGroups: - - nfs.rook.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - nfsservers ---- -kind: Service -apiVersion: v1 -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -spec: - selector: - app: rook-nfs-webhook - ports: - - port: 443 - targetPort: webhook-server ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system - labels: - app: rook-nfs-webhook -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-webhook - template: - metadata: - labels: - app: rook-nfs-webhook - spec: - containers: - - name: rook-nfs-webhook - image: rook/nfs:master - imagePullPolicy: IfNotPresent - args: ["nfs", "webhook"] - ports: - - containerPort: 9443 - name: webhook-server - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: rook-nfs-webhook-cert diff --git a/cluster/olm/ceph/assemble/metadata-common.yaml b/cluster/olm/ceph/assemble/metadata-common.yaml index 48b4734e7a4a..34e7c405d0f6 100644 --- a/cluster/olm/ceph/assemble/metadata-common.yaml +++ b/cluster/olm/ceph/assemble/metadata-common.yaml @@ -230,7 +230,7 @@ metadata: }, "spec": { "cephVersion": { - "image": "quay.io/ceph/ceph:v16.2.5" + "image": "quay.io/ceph/ceph:v16.2.6" }, "dataDirHostPath": "/var/lib/rook", "mon": { diff --git a/cluster/olm/ceph/generate-rook-csv-templates.sh b/cluster/olm/ceph/generate-rook-csv-templates.sh index 59a1f3d0c1c8..d6043cb2e4ec 100755 --- a/cluster/olm/ceph/generate-rook-csv-templates.sh +++ b/cluster/olm/ceph/generate-rook-csv-templates.sh @@ -12,13 +12,13 @@ if [ -f "Dockerfile" ]; then cd ../../ fi -OLM_CATALOG_DIR=cluster/olm/ceph +: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" DEPLOY_DIR="$OLM_CATALOG_DIR/deploy" CRDS_DIR="$DEPLOY_DIR/crds" TEMPLATES_DIR="$OLM_CATALOG_DIR/templates" -SED=${SED_CMD:-"sed -i'' -e"} +: "${SED_IN_PLACE:="build/sed-in-place"}" function generate_template() { local provider=$1 @@ -32,7 +32,7 @@ function generate_template() { mv $tmp_csv_gen_file $csv_template_file # replace the placeholder with the templated value - $SED "s/9999.9999.9999/{{.RookOperatorCsvVersion}}/g" $csv_template_file + $SED_IN_PLACE "s/9999.9999.9999/{{.RookOperatorCsvVersion}}/g" $csv_template_file echo "Template stored at $csv_template_file" } diff --git a/cluster/olm/ceph/generate-rook-csv.sh b/cluster/olm/ceph/generate-rook-csv.sh index c5b518a4fddb..263d4399ce3d 100755 --- a/cluster/olm/ceph/generate-rook-csv.sh +++ b/cluster/olm/ceph/generate-rook-csv.sh @@ -4,7 +4,7 @@ set -e ################## # INIT VARIABLES # ################## -OLM_CATALOG_DIR=cluster/olm/ceph +: "${OLM_CATALOG_DIR:=cluster/olm/ceph}" ASSEMBLE_FILE_COMMON="$OLM_CATALOG_DIR/assemble/metadata-common.yaml" ASSEMBLE_FILE_K8S="$OLM_CATALOG_DIR/assemble/metadata-k8s.yaml" ASSEMBLE_FILE_OCP="$OLM_CATALOG_DIR/assemble/metadata-ocp.yaml" @@ -76,8 +76,7 @@ ROOK_OP_VERSION=$3 ############# # VARIABLES # ############# -SED_I=(sed -i'' -e) -[ -n "$SED_CMD" ] || read -ra SED_CMD <<< "${SED_I[@]}" +: "${SED_IN_PLACE:="build/sed-in-place"}" YQ_CMD_DELETE=($yq delete -i) YQ_CMD_MERGE_OVERWRITE=($yq merge --inplace --overwrite --prettyPrint) YQ_CMD_MERGE=($yq merge --inplace --append -P ) @@ -226,26 +225,26 @@ function hack_csv() { # rook-ceph-osd --> serviceAccountName # rook-ceph-osd --> rule - "${SED_I[@]}" 's/rook-ceph-global/rook-ceph-system/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rook-ceph-object-bucket/rook-ceph-system/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rook-ceph-cluster-mgmt/rook-ceph-system/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rook-ceph-global/rook-ceph-system/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rook-ceph-object-bucket/rook-ceph-system/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rook-ceph-cluster-mgmt/rook-ceph-system/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rook-ceph-mgr-cluster/rook-ceph-mgr/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rook-ceph-mgr-system/rook-ceph-mgr/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rook-ceph-mgr-cluster/rook-ceph-mgr/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rook-ceph-mgr-system/rook-ceph-mgr/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/cephfs-csi-nodeplugin/rook-csi-cephfs-plugin-sa/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/cephfs-external-provisioner-runner/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/cephfs-csi-nodeplugin/rook-csi-cephfs-plugin-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/cephfs-external-provisioner-runner/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rbd-csi-nodeplugin/rook-csi-rbd-plugin-sa/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rbd-external-provisioner-runner/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rbd-csi-nodeplugin/rook-csi-rbd-plugin-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rbd-external-provisioner-runner/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" # The operator-sdk also does not properly respect when # Roles differ from the Service Account name # The operator-sdk instead assumes the Role/ClusterRole is the ServiceAccount name # # To account for these mappings, we have to replace Role/ClusterRole names with # the corresponding ServiceAccount. - "${SED_I[@]}" 's/cephfs-external-provisioner-cfg/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" - "${SED_I[@]}" 's/rbd-external-provisioner-cfg/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/cephfs-external-provisioner-cfg/rook-csi-cephfs-provisioner-sa/' "$CSV_FILE_NAME" + $SED_IN_PLACE 's/rbd-external-provisioner-cfg/rook-csi-rbd-provisioner-sa/' "$CSV_FILE_NAME" } function generate_package() { diff --git a/cmd/rook/cassandra/cassandra.go b/cmd/rook/cassandra/cassandra.go deleted file mode 100644 index 5ce24ac0adf1..000000000000 --- a/cmd/rook/cassandra/cassandra.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cassandra - -import ( - "github.com/coreos/pkg/capnslog" - "github.com/spf13/cobra" -) - -// Cmd exports cobra command according to the cobra documentation. -var Cmd = &cobra.Command{ - Use: "cassandra", - Short: "Main command for cassandra controller pod.", -} - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "cassandracmd") -) - -func init() { - Cmd.AddCommand(operatorCmd, sidecarCmd) -} diff --git a/cmd/rook/cassandra/operator.go b/cmd/rook/cassandra/operator.go deleted file mode 100644 index f890ed5f2a34..000000000000 --- a/cmd/rook/cassandra/operator.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cassandra - -import ( - "fmt" - "time" - - "github.com/rook/rook/cmd/rook/rook" - rookinformers "github.com/rook/rook/pkg/client/informers/externalversions" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apiserver/pkg/server" - kubeinformers "k8s.io/client-go/informers" -) - -const resyncPeriod = time.Second * 30 - -var operatorCmd = &cobra.Command{ - Use: "operator", - Short: "Runs the cassandra operator to deploy and manage cassandra in Kubernetes", - Long: `Runs the cassandra operator to deploy and manage cassandra in kubernetes clusters. -https://github.com/rook/rook`, -} - -func init() { - flags.SetFlagsFromEnv(operatorCmd.Flags(), rook.RookEnvVarPrefix) - - operatorCmd.RunE = startOperator -} - -func startOperator(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(operatorCmd.Flags()) - - logger.Infof("starting cassandra operator") - context := rook.NewContext() - kubeClient := context.Clientset - rookClient := context.RookClientset - rookImage := rook.GetOperatorImage(kubeClient, "") - - // Only watch kubernetes resources relevant to our app - tweakListOptionsFunc := func(options *metav1.ListOptions) { - - options.LabelSelector = fmt.Sprintf("%s=%s", "app", constants.AppName) - } - - kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithTweakListOptions(tweakListOptionsFunc)) - rookInformerFactory := rookinformers.NewSharedInformerFactory(rookClient, resyncPeriod) - - c := controller.New( - rookImage, - kubeClient, - rookClient, - rookInformerFactory.Cassandra().V1alpha1().Clusters(), - kubeInformerFactory.Apps().V1().StatefulSets(), - kubeInformerFactory.Core().V1().Services(), - kubeInformerFactory.Core().V1().Pods(), - ) - - // Create a channel to receive OS signals - stopCh := server.SetupSignalHandler() - - // Start the informer factories - go kubeInformerFactory.Start(stopCh) - go rookInformerFactory.Start(stopCh) - - // Start the controller - if err := c.Run(1, stopCh); err != nil { - logger.Fatalf("Error running controller: %s", err.Error()) - } - - return nil -} diff --git a/cmd/rook/cassandra/sidecar.go b/cmd/rook/cassandra/sidecar.go deleted file mode 100644 index 742e694c2c4f..000000000000 --- a/cmd/rook/cassandra/sidecar.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cassandra - -import ( - "fmt" - "os" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/cassandra/sidecar" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apiserver/pkg/server" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/informers/internalinterfaces" -) - -var sidecarCmd = &cobra.Command{ - Use: "sidecar", - Short: "Runs the cassandra sidecar to deploy and manage cassandra in Kubernetes", - Long: `Runs the cassandra sidecar to deploy and manage cassandra in kubernetes clusters. -https://github.com/rook/rook`, -} - -func init() { - flags.SetFlagsFromEnv(operatorCmd.Flags(), rook.RookEnvVarPrefix) - - sidecarCmd.RunE = startSidecar -} - -func startSidecar(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(operatorCmd.Flags()) - - context := rook.NewContext() - kubeClient := context.Clientset - rookClient := context.RookClientset - - podName := os.Getenv(k8sutil.PodNameEnvVar) - if podName == "" { - rook.TerminateFatal(fmt.Errorf("cannot detect the pod name. Please provide it using the downward API in the manifest file")) - } - podNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - if podNamespace == "" { - rook.TerminateFatal(fmt.Errorf("cannot detect the pod namespace. Please provide it using the downward API in the manifest file")) - } - - // This func will make our informer only watch resources with the name of our member - tweakListOptionsFunc := internalinterfaces.TweakListOptionsFunc( - func(options *metav1.ListOptions) { - options.FieldSelector = fmt.Sprintf("metadata.name=%s", podName) - }, - ) - - // kubeInformerFactory watches resources with: - // namespace: podNamespace - // name: podName - kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( - kubeClient, - resyncPeriod, - kubeinformers.WithNamespace(podNamespace), - kubeinformers.WithTweakListOptions(tweakListOptionsFunc), - ) - - mc, err := sidecar.New( - podName, - podNamespace, - kubeClient, - rookClient, - kubeInformerFactory.Core().V1().Services(), - ) - - if err != nil { - rook.TerminateFatal(fmt.Errorf("failed to initialize member controller: %s", err.Error())) - } - logger.Infof("Initialized Member Controller: %+v", mc) - - // Create a channel to receive OS signals - stopCh := server.SetupSignalHandler() - go kubeInformerFactory.Start(stopCh) - - // Start the controller loop - logger.Infof("Starting rook sidecar for Cassandra.") - if err = mc.Run(1, stopCh); err != nil { - logger.Fatalf("Error running sidecar: %s", err.Error()) - } - - return nil -} diff --git a/cmd/rook/ceph/operator.go b/cmd/rook/ceph/operator.go index d8979b52b5c6..41765b1b38dd 100644 --- a/cmd/rook/ceph/operator.go +++ b/cmd/rook/ceph/operator.go @@ -23,7 +23,6 @@ import ( "github.com/rook/rook/pkg/daemon/ceph/agent/flexvolume/attachment" operator "github.com/rook/rook/pkg/operator/ceph" cluster "github.com/rook/rook/pkg/operator/ceph/cluster" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" "github.com/rook/rook/pkg/operator/ceph/csi" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" @@ -44,9 +43,6 @@ https://github.com/rook/rook`, } func init() { - operatorCmd.Flags().DurationVar(&mon.HealthCheckInterval, "mon-healthcheck-interval", mon.HealthCheckInterval, "mon health check interval (duration)") - operatorCmd.Flags().DurationVar(&mon.MonOutTimeout, "mon-out-timeout", mon.MonOutTimeout, "mon out timeout (duration)") - // csi deployment templates operatorCmd.Flags().StringVar(&csi.RBDPluginTemplatePath, "csi-rbd-plugin-template-path", csi.DefaultRBDPluginTemplatePath, "path to ceph-csi rbd plugin template") diff --git a/cmd/rook/ceph/osd.go b/cmd/rook/ceph/osd.go index 0cdeb8a5e8f6..3e20f3a72e9e 100644 --- a/cmd/rook/ceph/osd.go +++ b/cmd/rook/ceph/osd.go @@ -70,6 +70,7 @@ var ( blockPath string lvBackedPV bool osdIDsToRemove string + preservePVC bool ) func addOSDFlags(command *cobra.Command) { @@ -98,6 +99,7 @@ func addOSDFlags(command *cobra.Command) { // flags for removing OSDs that are unhealthy or otherwise should be purged from the cluster osdRemoveCmd.Flags().StringVar(&osdIDsToRemove, "osd-ids", "", "OSD IDs to remove from the cluster") + osdRemoveCmd.Flags().BoolVar(&preservePVC, "preserve-pvc", false, "Whether PVCs for OSDs will be deleted") // add the subcommands to the parent osd command osdCmd.AddCommand(osdConfigCmd, @@ -236,7 +238,7 @@ func prepareOSD(cmd *cobra.Command, args []string) error { Message: err.Error(), PvcBackedOSD: cfg.pvcBacked, } - oposd.UpdateNodeStatus(kv, cfg.nodeName, status) + oposd.UpdateNodeOrPVCStatus(kv, cfg.nodeName, status) rook.TerminateFatal(err) } @@ -260,7 +262,7 @@ func removeOSDs(cmd *cobra.Command, args []string) error { context := createContext() // Run OSD remove sequence - err := osddaemon.RemoveOSDs(context, &clusterInfo, strings.Split(osdIDsToRemove, ",")) + err := osddaemon.RemoveOSDs(context, &clusterInfo, strings.Split(osdIDsToRemove, ","), preservePVC) if err != nil { rook.TerminateFatal(err) } diff --git a/cmd/rook/main.go b/cmd/rook/main.go index 2d7699d4b941..bebdf07fcf2d 100644 --- a/cmd/rook/main.go +++ b/cmd/rook/main.go @@ -18,9 +18,7 @@ package main import ( "fmt" - "github.com/rook/rook/cmd/rook/cassandra" "github.com/rook/rook/cmd/rook/ceph" - "github.com/rook/rook/cmd/rook/nfs" rook "github.com/rook/rook/cmd/rook/rook" "github.com/rook/rook/cmd/rook/util" "github.com/rook/rook/cmd/rook/version" @@ -39,8 +37,6 @@ func addCommands() { discoverCmd, // backend commands ceph.Cmd, - nfs.Cmd, - cassandra.Cmd, // util commands util.CmdReporterCmd, diff --git a/cmd/rook/nfs/nfs.go b/cmd/rook/nfs/nfs.go deleted file mode 100644 index 8c5057757118..000000000000 --- a/cmd/rook/nfs/nfs.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/coreos/pkg/capnslog" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "nfs", - Short: "Main command for NFS operator and daemons.", -} - -var ( - logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfscmd") -) - -func init() { - Cmd.AddCommand(operatorCmd) - Cmd.AddCommand(webhookCmd) - Cmd.AddCommand(provisonerCmd) - Cmd.AddCommand(serverCmd) -} diff --git a/cmd/rook/nfs/operator.go b/cmd/rook/nfs/operator.go deleted file mode 100644 index 2d6e8d0a3d79..000000000000 --- a/cmd/rook/nfs/operator.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/rook/rook/cmd/rook/rook" - operator "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var operatorCmd = &cobra.Command{ - Use: "operator", - Short: "Runs the NFS operator to deploy and manage NFS server in kubernetes clusters", - Long: `Runs the NFS operator to deploy and manage NFS server in kubernetes clusters. -https://github.com/rook/rook`, -} - -func init() { - flags.SetFlagsFromEnv(operatorCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(operatorCmd.Flags()) - - operatorCmd.RunE = startOperator -} - -func startOperator(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(operatorCmd.Flags()) - - logger.Infof("starting NFS operator") - context := rook.NewContext() - op := operator.New(context) - err := op.Run() - rook.TerminateOnError(err, "failed to run operator") - - return nil -} diff --git a/cmd/rook/nfs/provisioner.go b/cmd/rook/nfs/provisioner.go deleted file mode 100644 index 49e41e3a8bf6..000000000000 --- a/cmd/rook/nfs/provisioner.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "errors" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -var provisonerCmd = &cobra.Command{ - Use: "provisioner", - Short: "Runs the NFS provisioner for provisioning volumes", - Long: "Runs the NFS provisioner for provisioning volumes from the rook provisioned nfs servers", -} - -var ( - provisioner *string -) - -func init() { - flags.SetFlagsFromEnv(provisonerCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(provisonerCmd.Flags()) - - provisioner = provisonerCmd.Flags().String("provisioner", "", "Name of the provisioner. The provisioner will only provision volumes for claims that request a StorageClass with a provisioner field set equal to this name.") - provisonerCmd.RunE = startProvisioner -} - -func startProvisioner(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(serverCmd.Flags()) - if len(*provisioner) == 0 { - return errors.New("--provisioner is a required parameter") - } - - rookContext := rook.NewContext() - clientset := rookContext.Clientset - rookClientset := rookContext.RookClientset - - serverVersion, err := clientset.Discovery().ServerVersion() - if err != nil { - logger.Fatalf("Error getting server version: %v", err) - } - - clientNFSProvisioner, err := nfs.NewNFSProvisioner(clientset, rookClientset) - if err != nil { - return err - } - - pc := controller.NewProvisionController(clientset, *provisioner, clientNFSProvisioner, serverVersion.GitVersion) - neverStopCtx := context.Background() - pc.Run(neverStopCtx) - return nil -} diff --git a/cmd/rook/nfs/server.go b/cmd/rook/nfs/server.go deleted file mode 100644 index 2e39e4c1a9e5..000000000000 --- a/cmd/rook/nfs/server.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "errors" - - "github.com/rook/rook/cmd/rook/rook" - "github.com/rook/rook/pkg/operator/nfs" - "github.com/rook/rook/pkg/util/flags" - "github.com/spf13/cobra" -) - -var serverCmd = &cobra.Command{ - Use: "server", - Short: "Runs the NFS server to deploy and manage NFS server in kubernetes clusters", - Long: `Runs the NFS operator to deploy and manage NFS server in kubernetes clusters. -https://github.com/rook/rook`, -} - -var ( - ganeshaConfigPath *string -) - -func init() { - flags.SetFlagsFromEnv(serverCmd.Flags(), rook.RookEnvVarPrefix) - flags.SetLoggingFlags(serverCmd.Flags()) - - ganeshaConfigPath = serverCmd.Flags().String("ganeshaConfigPath", "", "ConfigPath of nfs ganesha") - - serverCmd.RunE = startServer -} - -func startServer(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(serverCmd.Flags()) - if len(*ganeshaConfigPath) == 0 { - return errors.New("--ganeshaConfigPath is a required parameter") - } - - logger.Infof("Setting up NFS server!") - - err := nfs.Setup(*ganeshaConfigPath) - if err != nil { - logger.Fatalf("Error setting up NFS server: %v", err) - } - - logger.Infof("starting NFS server") - // This blocks until server exits (presumably due to an error) - err = nfs.Run(*ganeshaConfigPath) - if err != nil { - logger.Errorf("NFS server Exited Unexpectedly with err: %v", err) - } - - return nil -} diff --git a/cmd/rook/nfs/webhook.go b/cmd/rook/nfs/webhook.go deleted file mode 100644 index f559ac74c71b..000000000000 --- a/cmd/rook/nfs/webhook.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "github.com/rook/rook/cmd/rook/rook" - operator "github.com/rook/rook/pkg/operator/nfs" - "github.com/spf13/cobra" -) - -var ( - port int - certDir string -) - -var webhookCmd = &cobra.Command{ - Use: "webhook", - Short: "Runs the NFS webhook admission", -} - -func init() { - webhookCmd.Flags().IntVar(&port, "port", 9443, "port that the webhook server serves at") - webhookCmd.Flags().StringVar(&certDir, "cert-dir", "", "directory that contains the server key and certificate. if not set will use default controller-runtime wwebhook directory") - webhookCmd.RunE = startWebhook -} - -func startWebhook(cmd *cobra.Command, args []string) error { - rook.SetLogLevel() - rook.LogStartupInfo(webhookCmd.Flags()) - - logger.Infof("starting NFS webhook") - webhook := operator.NewWebhook(port, certDir) - err := webhook.Run() - rook.TerminateOnError(err, "failed to run wbhook") - - return nil -} diff --git a/design/cassandra/cluster-creation.md b/design/cassandra/cluster-creation.md deleted file mode 100644 index 93dcc863b101..000000000000 --- a/design/cassandra/cluster-creation.md +++ /dev/null @@ -1,214 +0,0 @@ -# Cluster Creation Design Doc - -In this document, we outline the procedure we need to follow to bootstrap a new Cassandra Cluster from scratch. We also explain and evaluate any decisions we had to take. - -## Sequencing of Events - -![cluster-creation-sequence-diagram](media/cluster-creation-sequence-diagram.png) - - -Explanation: - -1. **User** creates an instance of Cluster CRD, ie: - -``` yaml -apiVersion: "cassandra.rook.io/v1alpha1" -kind: "Cluster" -metadata: - name: "my-cassandra-cluster" -spec: - version: "3.1.11" - # Optional: repository overrides the default image repo - repository: "custom-enterprise-repo.io/cassandra" - # Optional: what database to expect in the image - mode: cassandra | scylla - dataCenter: - name: "us-east-1" - racks: - - name: "us-east-1c" - instances: 3 - # Optional: configMapName references a user's custom configuration for - # a specific Cassandra Rack - configMapName: "cassandra-config" - # Optional: configMapName with single jmx_exporter_config.yaml file - # reference a custom jmx prometheus exporter configuration for CassandraRack - jmxExporterConfigMapName: "jmx-prometheus-config" - # Rook Common Type: StorageSpec - storage: - volumeClaims: - - storageClassName: - metadata: - name: "cassandra-data" - spec: - accessModes: ["ReadWriteOnce"] - storageClassName: default - resources: - requests: - storage: "500Gi" - # Rook Common Type: PlacementSpec - # Optional: Placement declares node/pod (anti)affinity - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: failure-domain.beta.kubernetes.io/region - operator: In - values: - - us-east-1 - - key: failure-domain.beta.kubernetes.io/zone - operator: In - values: - - us-east-1c - # Rook Common Type: ResourceSpec - # Resources declares the CPU,RAM resources of a single instance of the Rack. - resources: - requests: - cpu: "2000m" - memory: "4Gi" - limits: - cpu: "2000m" - memory: "4Gi" -``` - -2. **Controller** is informed of a new Cluster CRD instance. For each (dc,rack) it creates: - 1. A StatefulSet (see [Appendix A](#appendix-a)) - 2. A Headless Service that clients will use to connect to ready members of the database. - 3. ClusterIP Services that serve as static IPs for Pods. Labeled accordingly if they are seeds. Each Service is named after the Pod that uses it. - -3. **StatefulSet Pod** starts and it starts the init container: - 1. Init container starts and copies the rook binary and other necessary files (like plugins) to the shared volume `/mnt/shared/`, then exits. - 2. The Cassandra container starts with the rook binary (sidecar) as its entrypoint. - 3. Sidecar starts and edits config files with custom values applying to Kubernetes (see [Appendix B](#appendix-b)). - 4. Sidecar starts the Cassandra process. - - -## Design Decisions - -* Seeds: for the time being, 2 members from each rack will serve as seeds. This provides good fault-tolerance without sacrificing performance. - -* Sidecar and Cassandra run in the same container. This provides the following advantages: - 1. Sidecar has direct access to the filesystem of the Cassandra instance, in order to edit config files in-place. - 2. Separate containers means users need to define cpu and ram requests for the sidecar. This fragments the resources of a Node and provides a bad UX (ie if a Node has 16 cores, you want to give 16 to Cassandra, not 15.9 to Cassandra and 0.1 to the Sidecar). It also doesn't integrate well with the [CPU Manager](https://kubernetes.io/blog/2018/07/24/feature-highlight-cpu-manager/) feature for cpu affinity, that users may want to use for additional performance. - -## Appendices - -### Appendix A - -The StatefulSet that will be created by the controller is more or less the following: - -``` yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: - namespace: - labels: - # Kubernetes recommended labels - app.kubernetes.io/name: cassandra - app.kubernetes.io/managed-by: rook - # Rook operator labels - cassandra.rook.io/cluster: - cassandra.rook.io/datacenter: - cassandra.rook.io/rack: -spec: - replicas: - serviceName: -hs - selector: - matchLabels: - cassandra.rook.io/cluster: - cassandra.rook.io/datacenter: - cassandra.rook.io/rack: - template: - metadata: - labels: - cassandra.rook.io/cluster: - cassandra.rook.io/datacenter: - cassandra.rook.io/rack: - spec: - volumes: - - name: shared - emptyDir: {} - initContainers: - - name: init - image: rook/rook-cassandra - imagePullPolicy: Always - command: - - "cp" - - "-a" - - "/sidecar/* /mnt/shared/" - volumeMounts: - - name: shared - mountPath: /mnt/shared - containers: - - name: cassandra - image: ":" - imagePullPolicy: IfNotPresent - command: - - "/mnt/shared/rook" - - "cassandra" - - "sidecar" - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - - containerPort: 9160 - name: thrift - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - volumeMounts: - - name: -data - mountPath: /var/lib/cassandra - - name: shared - mountPath: /mnt/shared - readOnly: true - volumeClaimTemplates: - - metadata: - name: -data - spec: - storageClassName: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: -``` - - - -### Appendix B - -The options that are of interest to the operator are: - -#### cassandra.yaml - -| Option | Description | Our Value | - --- | --- | --- -| `cluster_name` | | name from Cluster CRD metadata | -| `listen_address` | The IP address or hostname that Cassandra binds to for connecting to this node. | Pod.IP | -| `broadcast_address` | The "public" IP address this node uses to broadcast to other nodes outside the network or across regions. | `ClusterIP` Service's Virtual IP | -| `rpc_address` | The listen address for client connections | Pod.IP | -| `broadcast_rpc_address` | RPC address to broadcast to drivers and other Cassandra nodes | A publicly accessible IP, obtained via an Ingress. Otherwise, same as `broadcast_address`. | -| `endpoint_snitch` | Cassandra uses the snitch to locate nodes and route requests. | GossipingPropertyFileSnitch. Also need to pass DC, RACK values to container. | -| `seed_provider` | The addresses of hosts designated as contact points in the cluster. A joining node contacts one of the nodes in the -seeds list to learn the topology of the ring. | Controller will label Pods that will serve as seeds with a `cassandra.rook.io/seed` label. Then, when the Sidecar is setting up the ClusterIP service, it will label it accordingly. To get the seeds for a cluster, you need to query for services with `cassandra.rook.io/seed`, `cassandra.rook.io/cluster` labels set accordingly. If we could develop a custom SeedProvider that queries the Kubernetes API, that would be great. For starters, we can have the sidecar retrieve them and provide them through the --seeds list. | - -#### cassandra-env.sh - -1. Load Jolokia for JMX <--> HTTP communication. -```bash -JVM_OPTS="$JVM_OPTS -javaagent:/mnt/shared/plugins/jolokia.jar=port=,host=" -``` diff --git a/design/cassandra/design.md b/design/cassandra/design.md deleted file mode 100644 index 35acb4a11f36..000000000000 --- a/design/cassandra/design.md +++ /dev/null @@ -1,181 +0,0 @@ -# Kubernetes Operator for Apache Cassandra - -## Why ? - -* Cassandra is one of the most popular NoSQL databases. -* Peer-to-Peer write-anywhere design for very high write and read throughput. -* Despite those advantages, maintaining a Cassandra cluster is a highly manual and tiring task, which requires extensive familiarity with Cassandra's internals. -* An operator will automate all the manual interventions that a human administrator normally has to perform and provide a production-ready database out-of-the-box. - -## Features - -The operator will provide all the important features of Cassandra while exposing a user-friendly declarative UI, in accordance to Kubernetes principles. -More specifically, those features include: - -1. Cluster Creation -2. (Auto)Scaling -3. Failed Cassandra Node Replacement / Recover from data loss -4. Schedule Node Repair -5. Backup/Restore -6. Metrics endpoint for Kubernetes - -Since [Scylla](https://www.scylladb.com/) uses the same interfaces and protocols as Cassandra and offers significantly better performance, the operator will also try to support it. This effort will continue as long as the interfaces remain the same and code works for both databases with very minimal changes. If Scylla start to differ significantly, it should be broken into its own operator. This is not expected though, as it is supposed to be a drop-in replacement of Cassandra. - -## Current Approaches - -### Operator-Based Approaches - -* [cassandra-operator](https://github.com/instaclustr/cassandra-operator) by [Instaclustr](https://www.instaclustr.com/): This is indeed a very promising project, coming from a company that offers managed Cassandra deployments. It also utilizes a sidecar, which accepts commands through HTTP endpoints. It is written in Java, so it misses on the more advanced functionality of client-go. Also I don't know how many people are out there developing Kubernetes operators in other than Golang languages, so that may be a barrier for the community the operator will attract. They also do not support different racks. - * *Phase:* Alpha - * *License:* Apache 2.0 - -* [navigator](https://github.com/jetstack/navigator) by [Jetstack](https://www.jetstack.io/): Navigator is aspiring to be a managed DBaaS platform on top of Kubernetes. Their model they promote is very similar to the one proposed here (CRD + Controller + Sidecar) and has been an inspiration while designing the model for Cassandra Operator. They currently provide operators for Cassandra and Elasticsearch. - * *Phase:* Alpha - * *License:* Apache 2.0 - -### Vanilla Approaches - -* Kubernetes StatefulSets: this approach uses the out-of-the-box features of StatefulSets to run a Cassandra cluster. This usually works fine, until something goes wrong, like a node failure. Also, it is very limited in how much it can be extended for more advanced uses, like node replacement, backups, restores, monitoring, etc. -* Helm: this approach encapsulates much of the complexity of the vanilla approach, offering a better UX. However, it suffers from the same caveats. - -## Design - -### Goals - -The operator should: -* Be level-based in the Kubernetes sense, immune from edge-case race-condition scenarios. -* Provide a UX consistent with the [Kuberneter API Conventions](https://github.com/kubernetes/community/blob/e8dbd18a193795bee952ba98c0c5529e880050f9/contributors/devel/api-conventions.md) as well as rook. -* Leverage the existing Kubernetes API Objects to offload as much work as possible. We don't want to reinvent the wheel. -* Provide an all-in-one production-ready solution to run Cassandra on Kubernetes. -* Allow for easy manual intervention when needed. - - -### Overview - -* This operator will use the pattern: `CRD` + `Controller` + `Sidecar` - -![operator-design-overview](media/operator_overview.jpg) - -### Sidecar - -In Cassandra, many actions require access to the cassandra process or the underlying system. More specifically: - -* *Cluster Creation:* dynamically insert java classes and other preparations for Cassandra to run. -* *Cassandra Node Replacement:* requires editing config files. -* *Backup, Monitoring:* requires talking to a JMX interface that the Cassandra process exposes. - -By running a sidecar alongside Cassandra, we can monitor the process closely and deal with complex failure scenarios. -For more information on how the sidecar works, please see the [sidecar design doc](sidecar.md) - -### Cassandra-Kubernetes Mapping - -* The design of the Cassandra CRD will follow the Cassandra terminology as much as possible, in order to be familiar to existing users of Cassandra and not confuse new users studying the Cassandra docs. - -* Cassandra abstracts resources in the following order: -`Cluster` -> `Datacenter` -> `Rack` -> `Node` - -* We map those abstractions to Kubernetes in the following way: - -| Cassandra | Kubernetes | -| :----: | :----: | -| Cluster | Cluster CRD | -| Datacenter | Pool of StatefulSets | -| Rack | StatefulSet | -| Node | Pod | - -### Cassandra Cluster CRD - -An example CRD would look something like this: - -``` yaml -apiVersion: "cassandra.rook.io/v1alpha1" -kind: "Cluster" -metadata: - name: "my-cassandra-cluster" -spec: - version: "3.1.11" - # Optional: repository overrides the default image repo - repository: "custom-enterprise-repo.io/cassandra" - dataCenter: - name: "us-east-1" - racks: - - name: "us-east-1c" - members: 3 - # Optional: configMapName references a user's custom configuration for - # a specific Cassandra Rack - configMapName: "cassandra-config" - # Rook Common Type: StorageSpec - storage: - volumeClaimTemplates: - - storageClassName: - metadata: - name: "cassandra-data" - spec: - accessModes: ["ReadWriteOnce"] - storageClassName: default - resources: - requests: - storage: "500Gi" - # Rook Common Type: PlacementSpec - # Optional: Placement declares node/pod (anti)affinity - placement: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: failure-domain.beta.kubernetes.io/region - operator: In - values: - - us-east-1 - - key: failure-domain.beta.kubernetes.io/zone - operator: In - values: - - us-east-1c - # Rook Common Type: ResourceSpec - # Resources declares the CPU,RAM resources of a single instance of the Rack. - resources: - requests: - cpu: "2000m" - memory: "4Gi" - limits: - cpu: "2000m" - memory: "4Gi" - # Optional: sidecarImage overwrites the default image used - sidecarImage: - repository: "rook.io/sidecar-cassandra" - tag: "0.1" -``` - -* The operator will create a StatefulSet for every Rack in each Datacenter. -* In practice, Datacenter is usually mapped to a Region and Rack to an Availability Zone. - -### Major Pain Point: Stable Pod Identity - -A problem that every approach so far has, is dealing with loss of persistence. -If a Cassandra instance loses its data (ie the underlying node fails, the disk dies). -The replacing instance has to provide the IP of the instance it's replacing. Cassandra doesn't support stable hostnames as an identification method. -However, IPs on Kubernetes are ephemeral and that leaves us with 2 options: - -* **Bookkeeping:** we do some kind of bookkeeping on Host-IDs (UUIDs uniquely identifying Cassandra instances) or IP addresses. This method has 2 major drawbacks: - 1. Complexity - 2. Subject to race-conditions in edge-case scenarios: since we don't know the instance's identity from the beginning, we have to query the instance to get it. If the instance loses its data (ie due to underlying node fail), then we have no way of knowing the identity of the instance. -* **Sticky IPs Workaround / Service per Instance:** we create a `ClusterIP` Service for each Cassandra instance(Pod). This essentially provides us with a stable IP address for each instance. Since Cassandra uses IP addresses as the means of identification, this means that we don't need to do any bookkeeping. Also, we are immune from race-conditions, as we know our instance's identity beforehand. Some sceptical thoughts about this method: - 1. Feels/Seems hacky - 2. Uses too many services: most clusters have a /12 IP block to utilize, so we do not expect them to run out of IPs soon. - 3. Performance Issues: since the default implementation of ClusterIPs is iptables this could be an issue with clusters in the 100s/1000s. However, an IPVS implementation of ClusterIP is [GA in 1.11](https://kubernetes.io/blog/2018/07/09/ipvs-based-in-cluster-load-balancing-deep-dive/) and works well even with 1000s of Services. - - -Based on the above, in accordance to our goals, we choose to implement the operator using the Service Per Instance approach. - -## Example Roadmap - -* This may be subject to change. - -1. Cluster Creation -2. Scaling Up -3. Scaling Down -4. Metrics Endpoint for Prometheus -5. Recover from data loss -6. Schedule Node Repair -7. Backup -8. Restore diff --git a/design/cassandra/media/cluster-creation-sequence-diagram.png b/design/cassandra/media/cluster-creation-sequence-diagram.png deleted file mode 100644 index 785e29c989ef..000000000000 Binary files a/design/cassandra/media/cluster-creation-sequence-diagram.png and /dev/null differ diff --git a/design/cassandra/media/operator_overview.jpg b/design/cassandra/media/operator_overview.jpg deleted file mode 100644 index d005661acb77..000000000000 Binary files a/design/cassandra/media/operator_overview.jpg and /dev/null differ diff --git a/design/cassandra/media/scale_down.png b/design/cassandra/media/scale_down.png deleted file mode 100644 index c30c6d73f969..000000000000 Binary files a/design/cassandra/media/scale_down.png and /dev/null differ diff --git a/design/cassandra/scale_down.md b/design/cassandra/scale_down.md deleted file mode 100644 index 8a4a3ecb6cd8..000000000000 --- a/design/cassandra/scale_down.md +++ /dev/null @@ -1,44 +0,0 @@ -## Cassandra Scale Down - -Overview of the actions we need to scale down a Cassandra Cluster. -Scaling down a Cassandra Cluster is a two part procedure: -1. Issue a `decommission` command on the instance that will shut down. Once the instance receives this command, it will stream its data to the other instances and then shut down permanently. This process can be very lengthy, especially for large datasets. -2. Once the instance has decommissioned, the StatefulSet can be safely scaled down. The PVC of the deleted Pod will remain (default behaviour of StatefulSet), so we need to clean it up or it may cause problems in the future (if the StatefulSet scales up again, the new Pod will get bound to the old PVC). - -### Background - -In Cassandra operator, each Pod has a corresponding ClusterIP Service, that serves as a static IP and thus its identity. We also use labels on those objects to communicate intent in our operator. For example, the `cassandra.rook.io/seed` label communicates that the instance is a seed. - -For database management and operations, Cassandra uses a Java RPC Interface (JMX). Since Go can't talk to JMX, we use an HTTP<->JMX bridge, Jolokia. This way, we can operate Cassandra though HTTP calls. - -### Algorithm - -With that in mind, the proposed algorithm is: - -* Phase 1 -Operator: -1. Detect requested scale down (`Rack[i].Spec.Members` < `RackStatus.Members`) -2. Add label `cassandra.rook.io/decommissioned` to the ClusterIP Service of the last pod of the StatefulSet. This serves as the record of intent to decommission that instance. -Sidecar: -3. Detect the `cassandra.rook.io/decommissioned` label on the ClusterIP Service Object. -4. Run `nodetool decommission` on the instance. - - -* Phase 2 -Sidecar: -1. Confirm that `decommission` has completed by running `nodetool status` on another instance and confirming its own ip is no longer in the Cluster State. -2. Update label to `cassandra.rook.io/decommissioned: true` -Operator: -3. Detect label change and scale down the StatefulSet. -4. Delete PVC of the now-deleted Pod. - -![scale_down_diagram](media/scale_down.png) - -### Security - -* In order to get the status of the Cassandra cluster from a remote instance, we need to expose the Jolokia HTTP Server outside the local instance. This is a security concern, since it includes powerful management capabilities. To secure it, we will use HTTPS with client certificates. All servers will use the same private key, which will be created as a Secret by the operator and mounted on the Pods. Certificates will be self-signed, also by the same private key. This simplifies things and also provides reasonable security. To hack this setup, one would need to gain access to the Secret. - -### Alternatives - -* `preStop` lifecycle hook: another option would be to have a `preStop` lifecycle hook which will issue the decommission command. The problem with that approach is that `preStop` hooks are best-effort. The Pod will be deleted even if the `preStop` hook fails. That makes it a bad fit for Cassandra, since we need to be absolutely sure that a Pod has decommissioned, otherwise unpredictable things will happen. - diff --git a/design/cassandra/sidecar.md b/design/cassandra/sidecar.md deleted file mode 100644 index bbf70d8643a4..000000000000 --- a/design/cassandra/sidecar.md +++ /dev/null @@ -1,58 +0,0 @@ -## Sidecar Design Proposal - -### Consideration: REST API - -When thinking about how our sidecar will communicate with our controller, a natural solution that comes to mind is though a REST API. The sidecar will run an HTTP Server which the other party will call. This is the approach used by [Netflix's Priam](https://github.com/Netflix/priam/wiki). - -However, this includes some extra complications. Remember that, according to our goals we are designing a level-based system. -First of all, some operations just take a long time. Backup, for example, might take hours to complete. -That means our operator must have an open TCP connection for all this time. If it gets interrupted, which we do expect to happen, this connection will be lost and we won't have any record that it ever happened. - - -This doesn't seem like the Kubernetes way of doing things. Consider this example, as one could think of the kubelet as a sidecar for Pods: - -* **Question:** Does the scheduler ping the kubelet each time it schedules a Pod and then wait for an answer? -* **Answer:** No, it writes the `nodeName` field on the PodSpec. In other words, it writes a record of intent. No matter how many times the kubelet or the scheduler crashes it doesn't matter. The record of intent is there. - -### Control-Loop Design - -* Based on our observations above, we design a method of communication in line with the Kubernetes philosophy. -* When the controller wants to invoke a functionality in a Sidecar, it should write a record of intent in the Kubernetes API Objects (etcd). The sidecar will be watching the Kubernetes API and responding accordingly. -* There are two approaches to represent the record of intent: - 1. **Labels:** - * When the controller wants to communicate with a sidecar, it will write a predefined label in the ClusterIP Service Object of the specific instance. For example, to communicate that we want an instance to decommission, we could write the label 'cassandra.rook.io/decommission`. The sidecar will see this and decommission the Cassandra instance. When it is done, it will change the label value to a predefined value. Then the controller will know to delete that instance. - * **Advantages:** - * Reuses Kubernetes built-in mechanisms - * Labels are query-able - * **Disadvantages:** - * Doesn't support nested fields - 2. **Member CRD** - * Each sidecar will watch an instance of a newly defined Member CRD and have its own `Spec` and `Status`. - * **Advantages:** - * More expressive and natural. Supports nested fields. - * Only our operator touches it. We don't expect it to happen often, but Pods are touched by pretty much everyone on the cluster. So if someone does stupid things, that affects us too. - * **Disadvantages:** - * Probably overkill to have for only a couple of fields. - * Induces an extra burden on etcd. - -### Decision - -* Given the above advantages and disadvantages of each approach, we will start implementing the Cassandra operator without the extra complexity of the Member Object. If in the process of developing it becomes clear that it is needed, we will add it then. - -### Example - -Let's consider the case of creating a new Cassandra Cluster. It will look something like this: - - -1. *User* creates CRD for a Cassandra Cluster. - -2. *Controller* sees the newly created CRD object and creates a StatefulSet for each Cassandra Rack and a ClusterIP Service for each member to serve as its static IP. Seed members have the label `cassandra.rook.io/seed` on their Service. - -3. *Cassandra* container starts and our custom entrypoint is entered. It waits for config files to be written to a predefined location (shared volume - emptyDir), then copies them to the correct location and starts. - -4. *Sidecar* starts, syncs with the Kubernetes API and gets its corresponding Service ClusterIP Object. - 1. Retrieve the static ip from `spec.clusteIP`. - 2. Get seed addresses by querying for the label `cassandra.rook.io/seed` in Services. - 3. Generate config files with our custom options and start Cassandra. - - diff --git a/design/ceph/ceph-cluster-cleanup.md b/design/ceph/ceph-cluster-cleanup.md index 251373a128cb..4cc116dda919 100644 --- a/design/ceph/ceph-cluster-cleanup.md +++ b/design/ceph/ceph-cluster-cleanup.md @@ -34,7 +34,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v16.2.5 + image: quay.io/ceph/ceph:v16.2.6 dataDirHostPath: /var/lib/rook mon: count: 3 diff --git a/design/ceph/object/store.md b/design/ceph/object/store.md index e3431644047d..bce028ff99b7 100644 --- a/design/ceph/object/store.md +++ b/design/ceph/object/store.md @@ -79,7 +79,16 @@ If there is a `zone` section in object-store configuration, then the pool sectio The gateway settings correspond to the RGW service. - `type`: Can be `s3`. In the future support for `swift` can be added. -- `sslCertificateRef`: If specified, this is the name of the Kubernetes secret that contains the SSL certificate to be used for secure connections to the object store. The secret must be in the same namespace as the Rook cluster. Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be in the format expected by the [RGW service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): "The server key, server certificate, and any other CA or intermediate certificates be supplied in one file. Each of these items must be in pem form." If the certificate is not specified, SSL will not be configured. +- `sslCertificateRef`: If specified, this is the name of the Kubernetes secret that contains the SSL + certificate to be used for secure connections to the object store. The secret must be in the same + namespace as the Rook cluster. If it is an opaque Kubernetes Secret, Rook will look in the secret + provided at the `cert` key name. The value of the `cert` key must be in the format expected by the + [RGW + service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): + "The server key, server certificate, and any other CA or intermediate certificates be supplied in + one file. Each of these items must be in pem form." If the certificate is not specified, SSL will + not be configured. +- `caBundleRef`: If specified, this is the name of the Kubernetes secret (type `opaque`) that contains ca-bundle to use. The secret must be in the same namespace as the Rook cluster. Rook will look in the secret provided at the `cabundle` key name. - `port`: The service port where the RGW service will be listening (http) - `securePort`: The service port where the RGW service will be listening (https) - `instances`: The number of RGW pods that will be started for this object store diff --git a/design/ceph/storage-class-device-set.md b/design/ceph/storage-class-device-set.md index faae67e98b05..b3221d40c4d9 100644 --- a/design/ceph/storage-class-device-set.md +++ b/design/ceph/storage-class-device-set.md @@ -206,7 +206,7 @@ spec: operator: In values: - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" + topologyKey: "failure-domain.beta.kubernetes.io/zone" volumeClaimTemplates: - spec: resources: @@ -360,7 +360,7 @@ spec: operator: In values: - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" + topologyKey: "failure-domain.beta.kubernetes.io/zone" volumeClaimTemplates: - spec: resources: @@ -384,7 +384,7 @@ spec: operator: In values: - cluster1 - topologyKey: "failure-domain.beta.kubernetes.io/zone" + topologyKey: "failure-domain.beta.kubernetes.io/zone" volumeClaimTemplates: - spec: resources: diff --git a/design/common/object-bucket.md b/design/common/object-bucket.md index b68c410dd018..db2bb118c497 100644 --- a/design/common/object-bucket.md +++ b/design/common/object-bucket.md @@ -97,7 +97,19 @@ The pools are the backing data store for the object store and are created with s The gateway settings correspond to the RGW service. - `type`: Can be `s3`. In the future support for `swift` can be added. -- `sslCertificateRef`: If specified, this is the name of the Kubernetes secret that contains the SSL certificate to be used for secure connections to the object store. The secret must be in the same namespace as the Rook cluster. Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be in the format expected by the [RGW service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): "The server key, server certificate, and any other CA or intermediate certificates be supplied in one file. Each of these items must be in pem form." If the certificate is not specified, SSL will not be configured. +- `sslCertificateRef`: If specified, this is the name of the Kubernetes secret that contains the SSL + certificate to be used for secure connections to the object store. The secret must be in the same + namespace as the Rook cluster. If it is an opaque Kubernetes Secret, Rook will look in the secret provided at the `cert` key name. The + value of the `cert` key must be in the format expected by the [RGW + service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb): + "The server key, server certificate, and any other CA or intermediate certificates be supplied in + one file. Each of these items must be in pem form." If the certificate is not specified, SSL will + not be configured. They are scenarios where the certificate DNS is set for a particular domain + that does not include the local Kubernetes DNS, namely the object store DNS service endpoint. If + adding the service DNS name to the certificate is not empty another key can be specified in the + secret's data: `insecureSkipVerify: true` to skip the certificate verification. It is not + recommended to enable this option since TLS is susceptible to machine-in-the-middle attacks unless + custom verification is used. - `port`: The service port where the RGW service will be listening (http) - `securePort`: The service port where the RGW service will be listening (https) - `instances`: The number of RGW pods that will be started for this object store (ignored if allNodes=true) diff --git a/design/nfs/nfs-controller-runtime.md b/design/nfs/nfs-controller-runtime.md deleted file mode 100644 index d65ef10118ad..000000000000 --- a/design/nfs/nfs-controller-runtime.md +++ /dev/null @@ -1,181 +0,0 @@ -# Implement controller-runtime in Rook NFS Operator - -## Background - -This proposal is to implement controller-runtime in Rook NFS Operator to improve reliability of the operator itself. Currently, Rook nfs-operator only simply watches an event of CustomResource from an informer using simple [WatchCR][rook-watchcr] method which has limited functionality such as event can not be re-queued if failed. To implement controller-runtime is expected to overcome the shortcomings of current implementation. - -## Why controller-runtime? - -[Controller-runtime][controller-runtime] is widely used for writing Kubernetes operators. It is also leveraged by Kubebuilder and Operator SDK. Controller-runtime consists of several packages that have their respective responsibilities in building operators. The main function of controller-runtime is - -- **Manager:** Runnable for the operator with leader election option. It is also provides shared dependencies such as clients, caches, schemes, etc. -- **Controller:** Provides types and functions for building Controllers which ensure for any given object, the actual state matches the desired state which called `Reconciling` process. -- **Admission Webhook:** Provides methods to build an admission webhook (both Mutating Admission Webhook and Validating Admission Webhook) and bootstrap a webhook server. -- **Envtest:** Provides libraries for integration testing by starting a local control plane (etcd and kube-apiserver). -- **Matrics:** Provides metrics utility for controller. - -## Implementation - -The implementation of this proposal is to rewrite NFS Operator controller to use controller-runtime and introduce the validation admission webhook using controller-runtime for NFS Operator. - -### Controller & Reconciliation - -Operators are Kubernetes extensions that use custom resources to manage applications and their components using the Kubernetes APIs and kubectl tooling. Operators follow the Kubernetes controller principles. The process in which the actual state of the object (both cluster object and external object) will be matching the desired state which called *Reconciliation* process in the controller-runtime. - -The current implementation is the operator watch an event (create, update and delete) of CustomResource and will be handled by registered function in `ResourceEventHandlerFuncs` which every event has its own handler but only the create handler that implemented. - -Controller-runtime introduces an interface called [Reconciler][Controller-runtime-reconciler] that will ensure the state of the system matches what is specified by the user in the object at the time the Reconciler is called. Reconciler responds to generic events so it will contain all of the business logic of a Controller (create, update, and delete). What have to do here is only to implement the [Reconcile][Controller-runtime-reconcile] method of the interface in the controller. The controller-runtime also have utility functions for creating and updating an object called [CreateOrUpdate][controller-runtime-createorupdate] which will make easier to handling update of an object. - -Since the implementation controller using controller-runtime only changes the logic of the controller, so the deployment process will be like current implementation. However, the deployment process of admission webhook using controller-runtime will have additional steps as explained below. - -### Validation - -CustomResource validation in the operator can be done through the Controller itself. However, the operator pattern has two common types to validate the CustomResource. - -- **Syntactic validation** By defining OpenAPI validation rules. -- **Semantic Validation** By creating ValidatingAdmissionConfiguration and Admission Webhook. - -The current implementation only validates the CustomResource in the controller and just gives an error log in the operator stdout if the given resource is invalid. In this implementation will also cover the CustomResouce validation both though *Syntactic validation* and *Semantic Validation* and also give an improvement validation in the controller. - -![validation-webhook-flow](../../Documentation/media/nfs-webhook-validation-flow.png "Validation Webhook Flow") - -To implement *Syntactic validation* is only by defining OpenAPI validation rules. Otherwise, the *Semantic Validation* implementation is a bit more complicated. Fortunately, controller-runtime provides an awesome package that helpfully to create admission webhook such as bootstraping webhook server, registering handler, etc. Just like controller that have [Reconciler][controller-runtime-reconciler] interface, admission webhook in controller-runtime also have [Validator][controller-runtime-validator] interface that handle the operations validation. - -> Controller-runtime also provide [Defaulter][controller-runtime-defaulter] interface to handle mutation webhook. - -Since the webhook server must be served through TLS, a valid TLS certificate will be required. In this case, we can depend on [cert-manager][cert-manager]. The cert-manager component can be deployed as usual [cert-manager-installation](cert-manager-installation) no matter which namespace the cert-manager component lives. But keep in mind that *Certificate* must be in the same namespace as webhook-server. - -![validation-webhook-deployment](../../Documentation/media/nfs-webhook-deployment.png "Validation Webhook Deployment") - -Example self signed certificate. - -```yaml ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: rook-nfs-webhook-cert - namespace: rook-nfs-system -spec: - dnsNames: - - rook-nfs-webhook.rook-nfs-system.svc - - rook-nfs-webhook.rook-nfs-system.svc.cluster.local - issuerRef: - kind: Issuer - name: rook-nfs-selfsigned-issuer - secretName: rook-nfs-webhook-cert ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: rook-nfs-selfsigned-issuer - namespace: rook-nfs-system -spec: - selfSigned: {} -``` - -And the ValidatingAdmissionConfiguration will look like - -```yaml ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - cert-manager.io/inject-ca-from: rook-nfs-system/rook-nfs-webhook-cert - creationTimestamp: null - name: rook-nfs-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: rook-nfs-webhook - namespace: rook-nfs-system - path: /validate-nfs-rook-io-v1alpha1-nfsserver - failurePolicy: Fail - name: validation.nfsserver.nfs.rook.io - rules: - - apiGroups: - - nfs.rook.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - nfsservers -``` - -By providing [cert-manager.io/inject-ca-from][cert-manager-cainjector] annotation, `cert-manager` will replace `.clientConfig.caBundle` with appropriate certificate. When constructing controller-runtime using [Builder][controller-runtime-webhook-builder] controller-runtime will serving the validation handler on `/validate-group-version-kind` and mutation handler on `/mutate-group-version-kind`. So `.clientConfig.service.path` must be have correct value. And the implementation is the admission webhook server will be deployed independently. The `Semantic Validation` will be optional and users can enable or disable this validation by deploying the admission webhook configuration and server or not. The example manifests to deploy the admission webhook server will look like this. - -```yaml ---- -kind: Service -apiVersion: v1 -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system -spec: - selector: - app: rook-nfs-webhook - ports: - - port: 443 - targetPort: webhook-server ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-nfs-webhook - namespace: rook-nfs-system - labels: - app: rook-nfs-webhook -spec: - replicas: 1 - selector: - matchLabels: - app: rook-nfs-webhook - template: - metadata: - labels: - app: rook-nfs-webhook - spec: - containers: - - name: rook-nfs-webhook - image: rook/nfs:master - imagePullPolicy: IfNotPresent - args: ["nfs", "webhook"] - ports: - - containerPort: 9443 - name: webhook-server - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: rook-nfs-webhook-cert -``` - -Since *Semantic Validation* will be optional, validating CustomResource in the controller should still there. The improvement that will be introduced is if a given resource is invalid it should be given information in the CustomResouce status subresource. - -## References - -1. https://book.kubebuilder.io/cronjob-tutorial/controller-overview.html -1. https://pkg.go.dev/sigs.k8s.io/controller-runtime -1. https://kubernetes.io/docs/concepts/extend-kubernetes/extend-cluster/ -1. https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ -1. https://www.openshift.com/blog/kubernetes-operators-best-practices - -[rook-watchcr]: https://github.com/rook/rook/blob/release-1.3/pkg/operator/k8sutil/customresource.go#L48 -[cert-manager]: https://cert-manager.io/ -[cert-manager-installation]: https://cert-manager.io/docs/installation/ -[cert-manager-cainjector]: https://cert-manager.io/docs/concepts/ca-injector/ -[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime -[controller-runtime-createorupdate]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil#CreateOrUpdate -[controller-runtime-reconcile]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Func.Reconcile -[controller-runtime-reconciler]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler -[controller-runtime-defaulter]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/webhook/admission#Defaulter -[controller-runtime-validator]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/webhook/admission#Validator -[controller-runtime-webhook-builder]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/builder#WebhookBuilder \ No newline at end of file diff --git a/design/nfs/nfs-provisioner-controlled-by-operator.md b/design/nfs/nfs-provisioner-controlled-by-operator.md deleted file mode 100644 index 375d242fee0d..000000000000 --- a/design/nfs/nfs-provisioner-controlled-by-operator.md +++ /dev/null @@ -1,115 +0,0 @@ -# NFS Provisioner Controlled by Operator - -## Summary - -NFS Provisioner is a built in dynamic provisioner for Rook NFS. The functionality works fine but has an issue where the provisioner uses the same underlying directory for each provisioned PV when provisioning two or more PV in the same share/export. This overlap means that each provisioned PV for a share/export can read/write each others data. - -This hierarchy is the current behaviour of NFS Provisioner when provisioning two PV in the same share/export: - -```text -export -├── sample-export -|   ├── data (from PV-A) -|   ├── data (from PV-B) -|   ├── data (from PV-A) -|   └── data (from PV-A) -└── another-export -``` - -Both PV-A and PV-B uses the `sample-export` directory as their data location. - -This proposal is to make Rook NFS Provisioner create a sub-directory for every provisioned PV in the same share/export. So it will have a hierarchy like: - -```text -export -├── sample-export -│   ├── pv-a -│   │   ├── data (from PV-A) -│   │   ├── data (from PV-A) -│   │   └── data (from PV-A) -│   └── pv-b -│      └── data (from PV-B) -└── another-export -``` - -Since those directories are not in the NFS Provisioner pod but in the NFS Server pod, NFS Provisioner cannot directly create sub-directories for them. The solution is to mount the whole underlying NFS share/export directory so that the NFS Provisioner can create a sub-directory for each provisioned PV. - -### Original Issue - -- https://github.com/rook/rook/issues/4982 - -### Goals - -- Make NFS Provisioner to create sub-directory for each provisioned PVs. -- Make NFS Provisioner use the sub-directory for each provisioned PV instead of using underlying directory. -- Improve reliability of NFS Provisioner. - -### Non-Goals - -- NFS Operator manipulates uncontrolled resources. - -## Proposal details - -The approach will be similar to [Kubernetes NFS Client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client), where the provisioner mounts the whole of NFS share/export into the provisioner pod (by kubelet), so that the provisioner can then create the appropriate sub-directory for each provisioned PV. Currently Rook NFS Provisioner is deployed independently and before the NFS Server itself, so we cannot mount the NFS share because we don't know the NFS Server IP or the share/export directory. - -The idea is to make NFS Provisioner controlled by the operator. So when an NFS Server is created, the operator also then creates its provisioner, which mounts each NFS share/export. Then, the NFS Provisioner can create a sub-directory for each provisioned PV. - -This is the example NFS Server - -```yaml -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs - namespace: rook-nfs -spec: - replicas: 1 - exports: - - name: share1 - ... - persistentVolumeClaim: - claimName: nfs-default-claim - - name: share2 - ... - persistentVolumeClaim: - claimName: nfs-another-claim -``` - -And the operator will creates the provisioner deployment like - -```yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: rook-nfs-provisioner - namespace: rook-nfs -spec: - ... - spec: - .... - containers: - - name: rook-nfs-provisioner - image: rook/nfs:master - args: ["nfs", "provisioner","--provisioner=nfs.rook.io/nfs-server-provisioner"] - volumes: - - name: share1 - nfs: - server: - path: /export/nfs-default-claim - - name: share2 - nfs: - server: - path: /export/nfs-another-claim -``` - -The provisioner deployment will be created in the same namespace as the NFS server and with the same privileges. Since the provisioner is automatically created by the operator, the provisioner deployment name and provisioner name flag (`--provisioner`) value will depend on NFSServer name. The provisioner deployment name will have an added suffix of `-provisioner` and the provisioner name will start with `nfs.rook.io/`. - -## Alternatives - -The other possible approach is NFS Provisioner mounts the NFS Server share manually (by executing `mount` command) before creating an appropriate directory for each PV. But in my humble opinion, NFS Provisioner would be lacking reliability under several conditions like NFSServer getting its exports updated, the cluster has two or more NFSServer, etc. - -## Glossary - -**Provisioned PV:** Persistent Volumes which provisioned by rook nfs provisioner through Storage Class and Persistent Volumes Claims. - -**NFS share/export:** A directory in NFS Server which exported using nfs protocol. diff --git a/design/nfs/nfs-quota.md b/design/nfs/nfs-quota.md deleted file mode 100644 index 357f54a27564..000000000000 --- a/design/nfs/nfs-quota.md +++ /dev/null @@ -1,122 +0,0 @@ -# NFS Quota - -## Background - -Currently, when the user creates NFS PersistentVolumes from an NFS Rook share/export via PersistentVolumeClaim, the provisioner does not provide the specific capacity as requested. For example the users create NFS PersistentVolumes via PersistentVolumeClaim as following: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: rook-nfs-pv-claim -spec: - storageClassName: "rook-nfs-share" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi -``` - -The client still can use the higher capacity than `1mi` as requested. - -This proposal is to add features which the Rook NFS Provisioner will provide the specific capacity as requested from `.spec.resources.requests.storage` field in PersistentVolumeClaim. - -## Implementation - -The implementation will be use `Project Quota` on xfs filesystem. When the users need to use the quota feature they should use xfs filesystem with `prjquota/pquota` mount options for underlying volume. Users can specify filesystem type and mount options through StorageClass that will be used for underlying volume. For example: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard-xfs -parameters: - fsType: xfs -mountOptions: - - prjquota -... -``` - -> Note: Many distributed storage providers for Kubernetes support xfs filesystem. Typically by defining `fsType: xfs` or `fs: xfs` (depend on storage providers) in storageClass parameters. for more detail about specify filesystem type please see https://kubernetes.io/docs/concepts/storage/storage-classes/ - -Then the underlying PersistentVolumeClaim should be using that StorageClass - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim -spec: - storageClassName: "standard-xfs" - accessModes: - - ReadWriteOnce -... -``` - -If the above conditions are met then the Rook NFS Provisioner will create projects and set the quota limit using [xfs_quota](https://linux.die.net/man/8/xfs_quota) before creating PersistentVolumes based on `.spec.resources.requests.storage` field in PersistentVolumeClaim. Otherwise the Rook NFS Provisioner will provision a PersistentVolumes without creating setting the quota. - -To creating the project, Rook NFS Provisioner will invoke the following command - -> xfs_quota -x -c project -s -p '*nfs_pv_directory* *project_id*' *projects_file* - -And setting quota with the command - -> xfs_quota -x -c 'limit -p bhard=*size* *project_id*' *projects_file* - -which - -1. *nfs_pv_directory* is sub-directory from exported directory that used for NFS PV. -1. *project_id* is unique id `uint16` 1 to 65535. -1. *size* is size of quota as requested. -1. *projects_file* is file that contains *project quota block* for persisting quota state purpose. In case the Rook NFS Provisioner pod is killed, Rook NFS Provisioner pod will restore the quota state based on *project quota block* entries in *projects_file* at startup. -1. *project quota block* is combine of *project_id*:*nfs_pv_directory*:*size* - -Since Rook NFS has the ability to create more than one NFS share/export that have different underlying volume directories, the *projects_file* will be saved on each underlying volume directory. So each NFS share/export will have different *projects_file* and each *project_file* will be persisted. The *projects_file* will only be created if underlying volume directory is mounted as `xfs` with `prjquota` mount options. This mean the existence of *project_file* will indicate if quota was enabled. The hierarchy of directory will look like: - -```text -/ -├── underlying-volume-A (export A) (mounted as xfs with prjquota mount options) -│ ├── projects_file -│ ├── nfs-pv-a (PV-A) (which quota created for) -│ │ ├── data (from PV-A) -│ └── nfs-pv-b (PV-B) (which quota created for) -│ └── data (from PV-B) -├── underlying-volume-B (export B) (mounted as xfs with prjquota mount options) -│ ├── projects_file -│ └── nfs-pv-c (PV-C) (which quota created for) -└── underlying-volume-C (export C) (not mounted as xfs) - └── nfs-pv-d (PV-D) (quota not created) -``` - -The hierarchy above is example Rook NFS has 3 nfs share/exports (A, B and C). *project_file* inside underlying-volume-A will contains *project quota block* like - -``` -1:/underlying-volume-A/nfs-pv-a:size -2:/underlying-volume-A/nfs-pv-b:size -``` - -*project_file* inside underlying-volume-B will look like - -``` -1:/underlying-volume-B/nfs-pv-c:size -``` - -underlying-volume-C not have *project_file* because it is not mounted as xfs filesystem. - -### Updating container image - -Since `xfs_quota` binary is not installed by default we need to update Rook NFS container image by installing `xfsprogs` package. - -### Why XFS - -Most of Kubernetes VolumeSource use ext4 filesystem type if `fsType` is unspecified by default. Ext4 also have project quota feature starting in [Linux kernel 4.4](https://lwn.net/Articles/671627/). But not like xfs which natively support project quota, to mount ext4 with prjquota option we need additional step such as enable the project quota through [tune2fs](https://linux.die.net/man/8/tune2fs) before it mounted and some linux distro need additional kernel module for quota management. So for now we will only support xfs filesystem when users need quota feature in Rook NFS and might we can expand to ext4 filesystem also if possible. - -## References - -1. https://kubernetes.io/docs/concepts/storage/volumes/ -1. https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/ -1. https://linux.die.net/man/8/xfs_quota -1. https://lwn.net/Articles/671627/ -1. https://linux.die.net/man/8/tune2fs -1. https://www.digitalocean.com/community/tutorials/how-to-set-filesystem-quotas-on-ubuntu-18-04#step-2-%E2%80%93-installing-the-quota-kernel-module diff --git a/design/nfs/nfs.md b/design/nfs/nfs.md deleted file mode 100644 index 3e035383dba6..000000000000 --- a/design/nfs/nfs.md +++ /dev/null @@ -1,290 +0,0 @@ -# Add NFS to Rook - -## Overview - -This document explores a design to add NFS to Rook. This is a part of the rook feature request [#1551](https://github.com/rook/rook/issues/1551). - -## Rook Architecture - -Rook turns distributed storage software into a self-managing, self-scaling, and self-healing storage services. It does this by automating deployment, bootstrapping, configuration, provisioning, scaling, upgrading, migration, disaster recovery, monitoring, and resource management. Rook uses the facilities provided by the underlying cloud-native container management, scheduling and orchestration platform to perform its duties. -![Rook Architecture on Kubernetes](../../Documentation/media/rook-architecture.png) - -## Network File System (NFS) - -NFS allows remote hosts to mount file systems over a network and interact with those file systems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network. - -## Why NFS? - -NFS is widely used for persistent storage in kubernetes cluster. Using NFS storage is a convenient and easy way to provision storage for applications. -An NFS volume allows an existing NFS (Network File System) share to be mounted into the pod. -The contents of an NFS volume are preserved and the volume is merely unmounted if the pod is stopped/destroyed. This means that an NFS volume can be pre-populated with data, and that data can be “handed off” between pods. -NFS supports multiple read/write simultaneously so a single share can be attached to multiple pods. - -## Design -With this design Rook is exploring to providing another widely adopted storage option for admins and users of cloud-native environments. This design tends to automate NFS starting from its configuration (such as allowed hosts, read/write permissions etc.) to deployment and provisioning. The operations on NFS which cannot be done natively by Kubernetes will be automated. -NFS doesn’t provide an internal provisioner for kubernetes, so Rook is needed as an external provisioner. -This design uses NFS-Ganesha server and NFS v4. - -### Initial Setup - -The flow of creating NFS backed storage in Rook is -1. The settings are determined and saved in an NFS server CRD (rook-nfs.yaml) -2. `kubectl create -f rook-nfs.yaml` -3. When the NFS CRD instance is created, Rook responds to this request by starting the NFS daemon with the required configuration and exports stated in the CRD and creates a service to expose NFS. -4. NFS volume is ready to be consumed by other pods through a PVC. - -### NFS CRD - -The NFS CRD spec will specify the following: -1. NFS server storage backend configuration. E.g., configuration for various storage backends(ceph, ebs, azure disk etc) that will be shared using NFS. -2. NFS server configuration - The following points are required for configuring NFS server: - - export (The volume being exported) - - client (The host or network to which the export is being shared) - - client options (The options to be used for the client) e.g., read and write permission, root squash etc. - -The parameters to configure NFS CRD are demonstrated in the example below which is followed by a table that explains the parameters: - -A simple example for sharing a volume(could be hostPath, cephFS, cephRBD, googlePD, EBS etc.) using NFS, without client specification and per export based configuration, whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /nfs-share; - Protocols = 4; - Sectype = sys; - Access_Type = RW; - Squash = none; - FSAL { - Name = VFS; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: root - persistentVolumeClaim: - claimName: googlePD-claim -``` -The table explains each parameter - -| Parameter | Description | Default | -| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -| `replicas` | The no. of NFS daemon to start | `1` | -| `exports` | Parameters for creating an export | | -| `exports.name` | Name of the volume being shared | | -| `exports.server` | NFS server configuration | | -| `exports.server.accessMode` | Volume access modes(Reading and Writing) for the share | `ReadOnly` | -| `exports.server.squash` | This prevents root users connected remotely from having root privileges | `root` | -| `exports.server.allowedClients` | Access configuration for clients that can consume the NFS volume | | -| `exports.server.allowedClients.name` | Name of the host/hosts | | -| `exports.server.allowedClients.clients` | The host or network to which export is being shared.(could be hostname, ip address, netgroup, CIDR network address, or all) | | -| `exports.server.allowedClients.accessMode` | Reading and Writing permissions for the client* | `ReadOnly` | -| `exports.server.allowedClients.squash` | Squash option for the client* | `root` | -| `exports.persistentVolumeClaim` | Claim to get volume(Volume could come from hostPath, cephFS, cephRBD, googlePD, EBS etc. and these volumes will be exposed by NFS server ). | | -| `exports.persistentVolumeClaim.claimName` | Name of the PVC | | - -*note: if `exports.server.accessMode` and `exports.server.squash` options are mentioned, `exports.server.allowedClients.accessMode` and `exports.server.allowedClients.squash` are overridden respectively. - -Available options for `volumes.allowedClients.accessMode` are: -1. ReadOnly -2. ReadWrite -3. none - -Available options for `volumes.allowedClients.squash` are: -1. none (No user id squashing is performed) -2. rootId (uid 0 and gid 0 are squashed to the anonymous uid and anonymous gid) -3. root (uid 0 and gid of any value are squashed to the anonymous uid and anonymous gid) -4. all (All users are squashed) - -The volume that needs to be exported by NFS must be attached to NFS server pod via PVC. Examples of volume that can be attached are Host Path, AWS Elastic Block Store, GCE Persistent Disk, CephFS, RBD etc. The limitations of these volumes also apply while they are shared by NFS. The limitation and other details about these volumes can be found [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - -### Examples - -Here are some examples for advanced configuration: - -1. For sharing a volume(could be hostPath, cephFS, cephRBD, googlePD, EBS etc.) using NFS, which will be shared as /nfs-share by the NFS server with different options for different clients whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /nfs-share; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = 172.17.0.5; - Access_Type = RO; - Squash = root; - } - CLIENT { - Clients = 172.17.0.0/16, serverX; - Access_Type = RW; - Squash = none; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: nfs-share - server: - allowedClients: - - name: host1 - clients: 172.17.0.5 - accessMode: ReadOnly - squash: root - - name: host2 - clients: - - 172.17.0.0/16 - - serverX - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: ebs-claim -``` - -2. For sharing multiple volumes using NFS, which will be shared as /share1 and /share2 by the NFS server whose NFS-Ganesha export entry looks like: -``` -EXPORT { - Export_Id = 1; - Path = /export; - Pseudo = /share1; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = all; - Access_Type = RO; - Squash = none; - } -} -EXPORT { - Export_Id = 2; - Path = /export2; - Pseudo = /share2; - Protocols = 4; - Sectype = sys; - FSAL { - Name = VFS; - } - CLIENT { - Clients = all; - Access_Type = RW; - Squash = none; - } -} -``` -the CRD instance will look like the following: -```yaml -apiVersion: rook.io/v1alpha1 -kind: NFSServer -metadata: - name: nfs-multi-vol - namespace: rook -spec: - replicas: 1 - exports: - - name: share1 - server: - allowedClients: - - name: ebs-host - clients: all - accessMode: ReadOnly - squash: none - persistentVolumeClaim: - claimName: ebs-claim - - name: share2 - server: - allowedClients: - - name: ceph-host - clients: all - accessMode: ReadWrite - squash: none - persistentVolumeClaim: - claimName: cephfs-claim -``` - -## Adding and Removing exports from an existing NFS server -Exports can be added and removed by updating the CRD using kubectl edit/replace -f rook-nfs.yaml - -## Client Access -The administrator creates a storage class. -Here is an example of NFS storage class for Example 1: -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-nfs -provisioner: nfs.rook.io/nfs -parameters: - server: nfs-vol - export: nfs-share -``` - -The user can use the NFS volume by creating a PVC. -Here is an example of NFS PVC -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: httpd-pv-claim - labels: - app: web -spec: - storageClassName: rook-nfs - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: web-server - labels: - app: web -spec: - template: - metadata: - labels: - app: web - tier: httpd - spec: - containers: - - image: httpd - name: httpd - ports: - - containerPort: 80 - name: httpd - volumeMounts: - - name: httpd-persistent-storage - mountPath: /var/www/html - volumes: - - name: httpd-persistent-storage - persistentVolumeClaim: - claimName: httpd-pv-claim ---- diff --git a/go.mod b/go.mod index c8ef3e8bc9f3..8f38c1bb1756 100644 --- a/go.mod +++ b/go.mod @@ -3,19 +3,20 @@ module github.com/rook/rook go 1.16 require ( - github.com/aws/aws-sdk-go v1.35.24 + github.com/aws/aws-sdk-go v1.37.19 github.com/banzaicloud/k8s-objectmatcher v1.1.0 - github.com/ceph/go-ceph v0.10.1-0.20210722102457-1a18c0719372 + github.com/ceph/go-ceph v0.11.0 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb - github.com/davecgh/go-spew v1.1.1 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-ini/ini v1.51.1 github.com/google/go-cmp v0.5.5 github.com/google/uuid v1.1.2 - github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a + github.com/hashicorp/vault v1.8.2 + github.com/hashicorp/vault-plugin-secrets-kv v0.9.0 + github.com/hashicorp/vault/api v1.1.2-0.20210713235431-1fc8af4c041f + github.com/hashicorp/vault/sdk v0.2.2-0.20210825150427-9b1f4d486f5d github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0 - github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210311161930-4bea5edaff58 + github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875 github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a @@ -26,21 +27,17 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/tevino/abool v1.2.0 - github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/ini.v1 v1.57.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.21.2 - k8s.io/apiextensions-apiserver v0.21.1 - k8s.io/apimachinery v0.21.2 - k8s.io/apiserver v0.21.1 - k8s.io/client-go v0.21.2 + k8s.io/api v0.21.3 + k8s.io/apiextensions-apiserver v0.21.3 + k8s.io/apimachinery v0.21.3 + k8s.io/client-go v0.21.3 k8s.io/cloud-provider v0.21.1 - k8s.io/component-helpers v0.21.1 k8s.io/kube-controller-manager v0.21.1 - k8s.io/utils v0.0.0-20210527160623-6fdb442a123b - sigs.k8s.io/controller-runtime v0.9.0 - sigs.k8s.io/kustomize/kyaml v0.10.17 + k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 + sigs.k8s.io/controller-runtime v0.9.6 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0 ) diff --git a/go.sum b/go.sum index a5d744715ac0..ab4944462b46 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= @@ -21,6 +22,7 @@ cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbf cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -30,34 +32,60 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible h1:7uk6GWtUqKg6weLv2dbKnzwb0ml1Qn70AdtRccZ543w= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= +github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11 h1:L4/pmq7poLdsy41Bj1FayKvBhayuWRYkx9HU5i4Ybl0= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 h1:8DQB8yl7aLQuP+nuR5e2RO6454OvFlSTXXaNHshc16s= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.7/go.mod h1:AkzUsqkrdmNhfP2i54HqINVQopw0CLDnvHpJ88Zz1eI= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -69,43 +97,71 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331 h1:3YnB7Hpmh1lPecPE8doMOtYCrMdrpedZOvxfuNES/Vk= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/aerospike/aerospike-client-go v3.1.1+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -114,11 +170,15 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -126,22 +186,55 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.1 h1:oNd9vmHdQuYICjy5hE2Ysz2rsIOBl4z7xA6IErlfd48= github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.7 h1:c/oCtWzYpboy6+6f6LjXRlyW7NwA2SWf+a9KMlHq/bM= +github.com/armon/go-metrics v0.3.7/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a h1:AP/vsCIvJZ129pdm9Ek7bH7yutN3hByqsMoNrWAxRQc= +github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.35.24 h1:U3GNTg8+7xSM6OAJ8zksiSM4bRqxBWmVwwehvOSNG3A= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.37.19 h1:/xKHoSsYfH9qe16pJAHIjqTVpMM2DRSsEt8Ok1bzYiw= +github.com/aws/aws-sdk-go v1.37.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= +github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= +github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= +github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/banzaicloud/k8s-objectmatcher v1.1.0 h1:KHWn9Oxh21xsaGKBHWElkaRrr4ypCDyrh15OB1zHtAw= github.com/banzaicloud/k8s-objectmatcher v1.1.0/go.mod h1:gGaElvgkqa0Lk1khRr+jel/nsCLfzhLnD3CEWozpk9k= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -149,42 +242,70 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= -github.com/ceph/go-ceph v0.10.1-0.20210722102457-1a18c0719372 h1:DZN/4RR6Yok0VJ3xaP8xxv8Le8bxJfX6XXE6Kxkvj2Y= -github.com/ceph/go-ceph v0.10.1-0.20210722102457-1a18c0719372/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY= +github.com/ceph/go-ceph v0.11.0 h1:A1pphV40LL8GQKDPpU4XqCa7gkmozsst7rhCC730/nk= +github.com/ceph/go-ceph v0.11.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY= github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe h1:PEmIrUvwG9Yyv+0WKZqjXfSFDeZjs/q15g0m08BYS9k= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -195,41 +316,62 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.0.0 h1:/mAA0XMgYJw2Uqm7WKGCsKnjitE/+A0FFbOmiRJm7LQ= +github.com/coreos/go-oidc/v3 v3.0.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg= +github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90= +github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8= +github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/csi-addons/spec v0.1.0 h1:y3TOd7qtnwBQPikGa1VvaL7ObyddAZehYW8DNGBlOyc= -github.com/csi-addons/spec v0.1.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI= -github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb h1:SAD+o8nvVErQkOIa31u1BblVHAXXEPQl7mRc+U5GBp8= -github.com/csi-addons/volume-replication-operator v0.1.1-0.20210525040814-ab575a2879fb/go.mod h1:cQvrR2fRQ7Z9jbbt3+PGZzFmByNfAH3KW8OuH3bkMbY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible h1:G2hY8RD7jB9QaSmcb8mYEIg8QbEvVAB7se8+lXHZHfg= +github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -239,13 +381,14 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f h1:AwZUiMWfYSmIiHdFJIubTSs8BFIFoMmUFbeuwBzHIPs= +github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8= @@ -253,7 +396,6 @@ github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/El github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.10.0+incompatible h1:l6Soi8WCOOVAeCo4W98iBFC6Og7/X8bpRt51oNLZ2C8= github.com/emicklei/go-restful v2.10.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -267,19 +409,27 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.11.0 h1:l4iX0RqNnx/pU7rY2DB/I+znuYY0K3x6Ywac6EIr0PA= +github.com/fatih/color v1.11.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8= github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -290,6 +440,10 @@ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -298,9 +452,17 @@ github.com/go-ini/ini v1.51.1 h1:/QG3cj23k5V8mOl4JnNzUNhc1kr/jzMiNsNuWKcx8gM= github.com/go-ini/ini v1.51.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-ldap/ldap/v3 v3.1.7/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E= +github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 h1:sfz1YppV05y4sYaW7kXZtrocU/+vimnIWt4cxAYh7+o= +github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3/go.mod h1:ZXFhGda43Z2TVbfGZefXyMJzsDHhCh0go3bZUcwTx7o= github.com/go-log/log v0.0.0-20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -308,16 +470,15 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -333,14 +494,12 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -358,37 +517,65 @@ github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gocql/gocql v0.0.0-20210401103645-80ab1e13e309 h1:8MHuCGYDXh0skFrLumkCMlt9C29hxhqNx39+Haemeqw= +github.com/gocql/gocql v0.0.0-20210401103645-80ab1e13e309/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -399,6 +586,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -436,12 +625,15 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -453,8 +645,12 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -476,19 +672,24 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -501,32 +702,45 @@ github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:Fecb github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/cap v0.1.0 h1:uBDfu9NDvmotza/mJW6vtQId+VYid9ztlTnDCW6YUWU= +github.com/hashicorp/cap v0.1.0/go.mod h1:VfBvK2ULRyqsuqAnjgZl7HJ7/CGMC7ro4H5eXiZuun8= github.com/hashicorp/consul-template v0.25.0/go.mod h1:/vUsrJvDuuQHcxEw0zik+YXTS7ZKWZjQeaQhshBmfH0= +github.com/hashicorp/consul-template v0.26.0/go.mod h1:HoNM2jHenwY2bqNHn5yYoMSAtHEFhbUDHYf1ZwTBOmg= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/consul/sdk v0.4.1-0.20200910203702-bb2b5dd871ca h1:DYR7hPxUqDQP4h3eX9/wI4J2yzL3QEsXi3TCXYtAgGI= +github.com/hashicorp/consul/sdk v0.4.1-0.20200910203702-bb2b5dd871ca/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bindata v3.0.8-0.20180209072458-bf7910af8997+incompatible/go.mod h1:+IrDq36jUYG0q6TsDY9uO2p77C8f8S5y+RbYHr2UI+U= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 h1:UgODETBAoROFMSSVgg0v8vVpD9Tol8FtYcAeomcWJtY= +github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-gcp-common v0.6.0/go.mod h1:RuZi18562/z30wxOzpjeRrGcmk9Ro/rBzixaSZDhIhY= +github.com/hashicorp/go-gcp-common v0.7.0 h1:DF2liDG2N71MYt5SN0FJRPdBjxeqx9wfM/PnF7a8Fqk= +github.com/hashicorp/go-gcp-common v0.7.0/go.mod h1:RuZi18562/z30wxOzpjeRrGcmk9Ro/rBzixaSZDhIhY= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -534,50 +748,66 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= -github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew= github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs= +github.com/hashicorp/go-kms-wrapping v0.5.16 h1:7qvB7JYLFART/bt1wafobMU5dDeyseE3ZBKB6UiyxWs= +github.com/hashicorp/go-kms-wrapping v0.5.16/go.mod h1:lxD7e9q7ZyCtDEP+tnMevsEvw3M0gmZnneAgv8BaO1Q= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-slug v0.4.1 h1:/jAo8dNuLgSImoLXaX7Od7QB4TfYCVPam+OpAt5bZqc= +github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-tfe v0.12.0 h1:teL523WPxwYzL5Gjc2QFxExndrMfWY4BXS2/olVpULM= +github.com/hashicorp/go-tfe v0.12.0/go.mod h1:oT0AG5u/ROzWiw8JZFLDY6FLh6AZnJIG0Ahhvp10txg= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -585,84 +815,182 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-3 h1:V95v5KSTu6DB5huDSKiq4uAfILEuNigK/+qPET6H/Mg= +github.com/hashicorp/hcl v1.0.1-vault-3/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 h1:p+2EISNdFCnD9R+B4xCiqSn429MCFtvM41aHJDJ6qW4= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.3.0 h1:Wox4J4R7J2FOJLtTa6hdk0VJfiNUSP32pYoYR738bkE= +github.com/hashicorp/raft v1.3.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft-autopilot v0.1.3 h1:Y+5jWKTFABJhCrpVwGpGjti2LzwQSzivoqd2wM6JWGw= +github.com/hashicorp/raft-autopilot v0.1.3/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab h1:WzGMwlO1DvaC93SvVOBOKtn+nXGEDXapyJuaRV3/VaY= +github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= +github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= +github.com/hashicorp/raft-snapshot v1.0.3 h1:lTgBBGMFcuKBTwHqWZ4r0TLzNsqo/OByCga/kM6F0uM= +github.com/hashicorp/raft-snapshot v1.0.3/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/vault v1.4.2 h1:KnAPBTb4G7JidQiUXVDk3+LPp+iWPMbMsGmw4POJI4k= +github.com/hashicorp/serf v0.9.4/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/vault v1.4.2/go.mod h1:500fLOj7p92Ys4X265LizqF78MzmHJUf1jV1zNJt060= +github.com/hashicorp/vault v1.8.2 h1:qn67376JJJ01UIu2/ZsMrGEuaIVX7c8snkCBNY4G94Q= +github.com/hashicorp/vault v1.8.2/go.mod h1:24IS2QF/PvpopcEsWYmpglWXvlq4z6RCwI+OltiZ9+w= github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5/go.mod h1:sQ+VNwPQlemgXHXikYH6onfH9gPwDZ1GUVRLz0ZvHx8= +github.com/hashicorp/vault-plugin-auth-alicloud v0.9.0 h1:VN8Tl+SThy/VcMhsQlFvafTh0COpaee52KtfxXdBB9o= +github.com/hashicorp/vault-plugin-auth-alicloud v0.9.0/go.mod h1:lyfBMcULXKJfVu8dNo1w9bUikV5oem5HKmJoWwXupnY= github.com/hashicorp/vault-plugin-auth-azure v0.5.6-0.20200422235613-1b5c70f9ef68/go.mod h1:RCVBsf8AJndh4c6iGZtvVZFui9SG0Bj9fnF0SodNIkw= +github.com/hashicorp/vault-plugin-auth-azure v0.8.0 h1:yy/JQWMq22QXP++3eg2WxUEnrtbu8ZrtIfcR0lD1XrI= +github.com/hashicorp/vault-plugin-auth-azure v0.8.0/go.mod h1:B8T1Xfy4SDWnor9CABIPmGseyBCOsuxJTtloxnDevQM= github.com/hashicorp/vault-plugin-auth-centrify v0.5.5/go.mod h1:GfRoy7NHsuR/ogmZtbExdJXUwbfwcxPrS9xzkyy2J/c= +github.com/hashicorp/vault-plugin-auth-centrify v0.9.0 h1:b8hWM81HU0zbAThs0f3pxCr4SY50ew3xCMBW61QBFQU= +github.com/hashicorp/vault-plugin-auth-centrify v0.9.0/go.mod h1:tLY05v1tC+sfeeE6DF8RAC/MGw4gflomYfA28b4VULw= github.com/hashicorp/vault-plugin-auth-cf v0.5.4/go.mod h1:idkFYHc6ske2BE7fe00SpH+SBIlqDKz8vk/IPLJuX2o= +github.com/hashicorp/vault-plugin-auth-cf v0.9.0 h1:UC9PO+lSB0gLIDnFHEefvG2usGQkYo7XPRu4GSgbk8s= +github.com/hashicorp/vault-plugin-auth-cf v0.9.0/go.mod h1:exPUMj8yNohKM7yRiHa7OfxQmyDI9Pj8+08qB4hGlVw= github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= github.com/hashicorp/vault-plugin-auth-gcp v0.6.2-0.20200428223335-82bd3a3ad5b3/go.mod h1:U0fkAlxWTEyQ74lx8wlGdD493lP1DD/qpMjXgOEbwj0= +github.com/hashicorp/vault-plugin-auth-gcp v0.10.0 h1:EBvgbyiPXqmmEQqIwkorLLEjvv4GPl6DQ1LdE0zJkh0= +github.com/hashicorp/vault-plugin-auth-gcp v0.10.0/go.mod h1:Z+mj9fAqzXfDNxLmMoSS8NheVK7ugLvD8sTHO1GXfCA= github.com/hashicorp/vault-plugin-auth-jwt v0.6.2/go.mod h1:SFadxIfoLGzugEjwUUmUaCGbsYEz2/jJymZDDQjEqYg= +github.com/hashicorp/vault-plugin-auth-jwt v0.10.1 h1:7hvGSiICXpmp7Ras5glxVVxTDg2dZL+l/jWeBQ6bzr0= +github.com/hashicorp/vault-plugin-auth-jwt v0.10.1/go.mod h1:3KxfehLIM7zH19+O8jHJ/QJsLGRzSKRqjsesOJmBuoI= github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5/go.mod h1:r4UqWITHYKmBeAMKPWqLo4V8bl/wNqoSIaQcMpeK9ss= +github.com/hashicorp/vault-plugin-auth-kerberos v0.4.0 h1:7M7/DbFsUoOMBd2/R48ZNj4PM3Gdsg0dGcbMOdt5z1Q= +github.com/hashicorp/vault-plugin-auth-kerberos v0.4.0/go.mod h1:h+7pLm4Z2EeKHOGPefX0bGzdUQCMBUlvM/BpSMNgTFw= github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1/go.mod h1:/Y9W5aZULfPeNVRQK0/nrFGpHWyNm0J3UWhOdsAu0vM= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.10.1 h1:7c2ufXt5oXSUISNHpO07W956fpgn00nT1IQFPEP5XQE= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.10.1/go.mod h1:2c/k3nsoGPKV+zpAWCiajt4e66vncEq8Li/eKLqErAc= github.com/hashicorp/vault-plugin-auth-oci v0.5.4/go.mod h1:j05O2b9fw2Q82NxDPhHMYVfHKvitUYGWfmqmpBdqmmc= +github.com/hashicorp/vault-plugin-auth-oci v0.8.0 h1:qYtVYsQlVnqqlCVqZ+CAiFEXuYJqUQCuqcWQVELybZY= +github.com/hashicorp/vault-plugin-auth-oci v0.8.0/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw= +github.com/hashicorp/vault-plugin-database-couchbase v0.4.1 h1:DSFwDOcmgZ+CSgTh4F5AK7p311QHoT1Jebj/z9PNi6g= +github.com/hashicorp/vault-plugin-database-couchbase v0.4.1/go.mod h1:Seivjno/BOtkqX41d/DDYtTg6zNoxIgNaUVZ3ObZYi4= github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4/go.mod h1:QjGrrxcRXv/4XkEZAlM0VMZEa3uxKAICFqDj27FP/48= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.8.0 h1:c9/fwjJf9XjXSM8WzCKL2fco4jyAudUSM9QIY4hY+5M= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.8.0/go.mod h1:QiQnpM6tI8LqIO+XfI/5AddV7d9cT1DhhOekLV2+AKY= github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.2-0.20200520204052-f840e9d4895c/go.mod h1:MP3kfr0N+7miOTZFwKv952b9VkXM4S2Q6YtQCiNKWq8= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.4.0 h1:baCsn+MRffmcqkOf3p6Fh0fvw2llXl63Ts4Fl14Vn3A= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.4.0/go.mod h1:ESNBxY0kbC8fZhyfYo0JcIwL4piI5+IZAHvnByceRoY= +github.com/hashicorp/vault-plugin-database-snowflake v0.2.1 h1:dEUjdnqWW8JIeGYjgdHRMNqX7cRUDdnXXcBUjw/7YG8= +github.com/hashicorp/vault-plugin-database-snowflake v0.2.1/go.mod h1:aXTJUUIdOVU/g3kiQNVAEcRhK5NzieOcYsUhsK6PgTw= +github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= +github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= github.com/hashicorp/vault-plugin-secrets-ad v0.6.6-0.20200520202259-fc6b89630f9f/go.mod h1:kk98nB+cwDbt3I7UGQq3ota7+eHZrGSTQZfSRGpluvA= +github.com/hashicorp/vault-plugin-secrets-ad v0.10.0 h1:iMS1SfIQtPfvPbw24W8HbNBb6o6wqSRjJwxNcZWEiw0= +github.com/hashicorp/vault-plugin-secrets-ad v0.10.0/go.mod h1:4AN/0ynq1Krn7LhwzoP/roj9JRdxiuptPpktq7ftLjo= github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5/go.mod h1:gAoReoUpBHaBwkxQqTK7FY8nQC0MuaZHLiW5WOSny5g= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.9.0 h1:EhTRXoWCjM3suD1atK97R2wWHBr/aacYByRnjzZvFCI= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.9.0/go.mod h1:SSkKpSTOMnX84PfgYiWHgwVg+YMhxHNjo+YCJGNBoZk= github.com/hashicorp/vault-plugin-secrets-azure v0.5.6/go.mod h1:Q0cIL4kZWnMmQWkBfWtyOd7+JXTEpAyU4L932PMHq3E= +github.com/hashicorp/vault-plugin-secrets-azure v0.10.0 h1:pJTWKVHYqfnlB3xg3XnnF9BOpj2/J7LC/e0RgiwkwKI= +github.com/hashicorp/vault-plugin-secrets-azure v0.10.0/go.mod h1:4jCVjTG809NCQ8mrSnbBtX17gX1Iush+558BVO6MJeo= github.com/hashicorp/vault-plugin-secrets-gcp v0.6.2-0.20200507171538-2548e2b5058d/go.mod h1:jVTE1fuhRcBOb/gnCT9W++AnlwiyQEX4S8iVCKhKQsE= +github.com/hashicorp/vault-plugin-secrets-gcp v0.10.2 h1:+DtlYJTsrFRInQpAo09KkYN64scrextjBiTSunpluo8= +github.com/hashicorp/vault-plugin-secrets-gcp v0.10.2/go.mod h1:psRQ/dm5XatoUKLDUeWrpP9icMJNtu/jmscUr37YGK4= github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.5/go.mod h1:b6RwFD1bny1zbfqhD35iGJdQYHRtJLx3HfBD109GO38= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.9.0 h1:7a0iWuFA/YNinQ1xXogyZHStolxMVtLV+sy1LpEHaZs= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.9.0/go.mod h1:hhwps56f2ATeC4Smgghrc5JH9dXR31b4ehSf1HblP5Q= github.com/hashicorp/vault-plugin-secrets-kv v0.5.5/go.mod h1:oNyUoMMQq6uNTwyYPnkldiedaknYbPfQIdKoyKQdy2g= +github.com/hashicorp/vault-plugin-secrets-kv v0.9.0 h1:nCw2IfWw2bWUGFZsNk8BvTEg9k7jDpRn48+VAqjdQ3s= +github.com/hashicorp/vault-plugin-secrets-kv v0.9.0/go.mod h1:B/Cybh5aVF7LNAMHwVBxY8t7r2eL0C6HVGgTyP4nKK4= github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2/go.mod h1:YRW9zn9NZNitRlPYNAWRp/YEdKCF/X8aOg8IYSxFT5Y= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.4.0 h1:6ve+7hZmGn7OpML81iZUxYj2AaJptwys323S5XsvVas= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.4.0/go.mod h1:4mdgPqlkO+vfFX1cFAWcxkeqz6JAtZgKxL/67q/58Oo= github.com/hashicorp/vault-plugin-secrets-openldap v0.1.3-0.20200518214608-746aba5fead6/go.mod h1:9Cy4Jp779BjuIOhYLjEfH3M3QCUxZgPnvJ3tAOOmof4= +github.com/hashicorp/vault-plugin-secrets-openldap v0.5.1 h1:iUJU3D/sA5qNBZnhXI5jFdwoWXMhgb6jeABDLYw631Y= +github.com/hashicorp/vault-plugin-secrets-openldap v0.5.1/go.mod h1:GiFI8Bxwx3+fn0A3SyVp9XdYQhm3cOgN8GzwKxyJ9So= +github.com/hashicorp/vault-plugin-secrets-terraform v0.2.0 h1:U5hT6xUUbIhI12v+tjzmUz47gpzg5yxbdf+q62sIIvc= +github.com/hashicorp/vault-plugin-secrets-terraform v0.2.0/go.mod h1:7r/0t51X/ZtSRh/TjBk7gCm1CUMk50aqLAx811OsGQ8= github.com/hashicorp/vault/api v1.0.1/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE= github.com/hashicorp/vault/api v1.0.5-0.20190730042357-746c0b111519/go.mod h1:i9PKqwFko/s/aihU1uuHGh/FaQS+Xcgvd9dvnfAvQb0= github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= -github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a h1:1DIoo5Mqq4RKFpL2iOmrX7DJIdMLiAt1Tv5f8nMJqRI= +github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= +github.com/hashicorp/vault/api v1.0.5-0.20200805123347-1ef507638af6/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/api v1.0.5-0.20200826195146-c03009a7e370/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/api v1.1.1/go.mod h1:29UXcn/1cLOPHQNMWA7bCz2By4PSd0VKPAydKXS5yN0= +github.com/hashicorp/vault/api v1.1.2-0.20210713235431-1fc8af4c041f h1:85dGMkdyO8G5IJP34vX7Y+xdaW9ocXRg6tbtKNIstH8= +github.com/hashicorp/vault/api v1.1.2-0.20210713235431-1fc8af4c041f/go.mod h1:N6fPyoC9nPsXqpQ4ebYIIE0iC25gpWvUoS9dMfZG2BM= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6/go.mod h1:EhK3a4sYnUbANAWxDP4LHf1GvP8DCtISGemfbEGbeo8= github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200215224050-f6547fa8e820/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 h1:e1ok06zGrWJW91rzRroyl5nRNqraaBe4d5hiKcVZuHM= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200527182800-ad90e0b39d2f/go.mod h1:B2Cbv/tzj8btUA5FF4SvYclTujJhlWU6siK4vo8tgXM= +github.com/hashicorp/vault/sdk v0.1.14-0.20200916184745-5576096032f8/go.mod h1:7GBJyKruotYxJlye8yHyGICV7kN7dQCNsCMTrb+v5J0= +github.com/hashicorp/vault/sdk v0.1.14-0.20210106220500-0ddc32f2ab8a/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= +github.com/hashicorp/vault/sdk v0.1.14-0.20210127185906-6b455835fa8c/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= +github.com/hashicorp/vault/sdk v0.1.14-0.20210204230556-cf85a862b7c6/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= +github.com/hashicorp/vault/sdk v0.2.0/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= +github.com/hashicorp/vault/sdk v0.2.1/go.mod h1:WfUiO1vYzfBkz1TmoE4ZGU7HD0T0Cl/rZwaxjBkgN4U= +github.com/hashicorp/vault/sdk v0.2.2-0.20210825150427-9b1f4d486f5d h1:LNFw41cY/UKzhWj6ZOSwmJkwHlCzWL+eZ2G1iHumkXQ= +github.com/hashicorp/vault/sdk v0.2.2-0.20210825150427-9b1f4d486f5d/go.mod h1:NSB/8AGzKoCBMOCTOLwT/kQI3G5Hf+Wdkmz+orcwPO0= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huaweicloud/golangsdk v0.0.0-20200304081349-45ec0797f2a4/go.mod h1:WQBcHRNX9shz3928lWEvstQJtAtYI7ks6XlgtRT9Tcw= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcmturner/aescts v1.0.1 h1:5jhUSHbHSZjQeWFY//Lv8dpP/O3sMDOxrGV/IfCqh44= github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= +github.com/jcmturner/dnsutils v1.0.1 h1:zkF8SbVatbr5LGrvcPSes62SV68lASVv6+x9wo2De+w= github.com/jcmturner/dnsutils v1.0.1/go.mod h1:tqMo38L01jO8AKxT0S9OQVlGZu3dkEt+z5CA+LOhwB0= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.0.0 h1:jNMtRRdNeZDFUNUX+ifpDcQzPS9nZlZH47JNyGIzdeE= github.com/jcmturner/gokrb5/v8 v8.0.0/go.mod h1:4/sqKY8Yzo/TIQ8MoCyk/EPcjb+czI9czxHcdXuZbFA= +github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0= github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= @@ -670,14 +998,18 @@ github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -699,7 +1031,11 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0 h1:IwEFm6n6dvFAqpi3BtcTgnjwM/oj9hA30ZV7d4I0FGU= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.0/go.mod h1:+1DpV8uIwteAhxNO0lgRox8gHkTG6w3OeDfAlg+qqjA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo= github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -707,31 +1043,41 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210311161930-4bea5edaff58 h1:O9m6tfyhjr3F3yKGsgGMp1+seFyTRovmGMmbcNp6GSo= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210311161930-4bea5edaff58/go.mod h1:XpQ9HGG9uF5aJCBP+s6w5kSiyTIVSqCV8+XAE4qms5E= -github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= -github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875 h1:jX3VXgmNOye8XYKjwcTVXcBYcPv3jj657fwX8DN/HiM= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20210818162813-3eee31c01875/go.mod h1:XpQ9HGG9uF5aJCBP+s6w5kSiyTIVSqCV8+XAE4qms5E= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lestrrat-go/jwx v0.9.0/go.mod h1:iEoxlYfZjvoGpuWwxUz+eR5e6KTJGsaRcy/YNA/UnBk= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs= github.com/libopenstorage/openstorage v8.0.0+incompatible/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libopenstorage/operator v0.0.0-20200725001727-48d03e197117/go.mod h1:Qh+VXOB6hj60VmlgsmY+R1w+dFuHK246UueM4SAqZG0= github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec h1:ezv9ybzCRb86E8aMgG7/GcNSRU/72D0BVEhkNjnCEz8= github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY= github.com/libopenstorage/stork v1.3.0-beta1.0.20200630005842-9255e7a98775/go.mod h1:qBSzYTJVHlOMg5RINNiHD1kBzlasnrc2uKLPZLgu1Qs= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -739,14 +1085,19 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -755,41 +1106,56 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 h1:uA3b4GgZMZxAJsTkd+CVQ85b7KBlD7HLpd/FfTNlGN0= github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/miekg/dns v1.1.40 h1:pyyPFfGMnciYUk/mXpKkVmeMQjfXqt3FAJ2hy7tPiLA= +github.com/miekg/dns v1.1.40/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= +github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= +github.com/mitchellh/pointerstructure v1.0.0 h1:ATSdz4NWrmWPOF1CeCBU4sMCno2hgqdbSrRPFWQSVZI= +github.com/mitchellh/pointerstructure v1.0.0/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -800,9 +1166,9 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -810,28 +1176,43 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/okta/okta-sdk-golang v1.0.1/go.mod h1:8k//sN2mFTq8Ayo90DqGbcumCkSmYjF0+2zkIbZysec= +github.com/okta/okta-sdk-golang v1.1.0 h1:sr/KYSMRhs4F2NWEbqWXqN4y4cKKcfzrtOiBqR/J6mI= +github.com/okta/okta-sdk-golang v1.1.0/go.mod h1:KEjmr3Zo+wP3gVa3XhwIvENBfh7L/iRUeIl6ruQYOK0= +github.com/okta/okta-sdk-golang/v2 v2.0.0 h1:qwl5Ezpy5a3I2WphiHolpgTtOC+YMTDIpFqOHmfiAGs= +github.com/okta/okta-sdk-golang/v2 v2.0.0/go.mod h1:fQubbeV8gksr8e1pmRVSE8kIj1TFqlgYqi8WsvSKmQk= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -843,12 +1224,23 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY= +github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM= github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk= @@ -856,32 +1248,55 @@ github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 h1:IDZyg/Kye github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a h1:mcl6pEpG0ZKeMnAMhtmcoy7jFY8PcMRHmxdRQmowxo4= github.com/openshift/machine-api-operator v0.2.1-0.20190903202259-474e14e4965a/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14= github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.6.2 h1:Q3Y8naCMyC1Nw91BHum1bGyEsNQc/UOIYS3ZoPoou0g= +github.com/ory/dockertest/v3 v3.6.2/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -890,10 +1305,14 @@ github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp github.com/portworx/sched-ops v0.20.4-openstorage-rc3/go.mod h1:DpRDDqXWQrReFJ5SHWWrURuZdzVKjrh2OxbAfwnrAyk= github.com/portworx/talisman v0.0.0-20191007232806-837747f38224/go.mod h1:OjpMH9Uh5o9ntVGktm4FbjLNwubJ3ITih2OfYrAeWtA= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac h1:jWKYCNlX4J5s8M0nHYkh7Y7c9gRVDEb3mq51j5J0F5M= +github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac/go.mod h1:hoLfEwdY11HjRfKFH6KqnPsfxlo3BP6bJehpDv8t6sQ= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0 h1:J+aQlaDVIemgZDR1f/48MBaiA7rDTm6OyKSRhDX2ZTY= @@ -906,6 +1325,7 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -915,6 +1335,7 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -924,10 +1345,13 @@ github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -940,13 +1364,21 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= +github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -954,17 +1386,27 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= +github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY= +github.com/sethvargo/go-limiter v0.3.0 h1:yRMc+Qs2yqw6YJp6UxrO2iUs6DOSq4zcnljbB7/rMns= +github.com/sethvargo/go-limiter v0.3.0/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -977,22 +1419,27 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:X github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.6.1 h1:gaRt3oK7ATFmLgAg6Gw7aKvWhWts3WV33d0YE4Ofh2U= +github.com/snowflakedb/gosnowflake v1.6.1/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1001,11 +1448,17 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= +github.com/square/go-jose/v3 v3.0.0-20200225220504-708a9fe87ddc/go.mod h1:JbpHhNyeVc538vtj/ECJ3gPYm1VEitNjsLhm4eJQQbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1014,57 +1467,88 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible h1:K3fcS92NS8cRntIdu8Uqy2ZSePvX73nNhOkKuPGJLXQ= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= github.com/tevino/abool v1.2.0 h1:heAkClL8H6w+mK5md9dzsuohKeXHUpY7Vw0ZCKW+huA= github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/Hmo4= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be h1:e8XjnroTyruokenelQLRje3D3nbti3ol45daXg5iWUA= -github.com/yanniszark/go-nodetool v0.0.0-20191206125106-cd8f91fa16be/go.mod h1:8e/E6xP+Hyo+dJy51hlGEbJkiYl0fEzvlQdqAEcg1oQ= +github.com/yandex-cloud/go-genproto v0.0.0-20200722140432-762fe965ce77/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= +github.com/yandex-cloud/go-sdk v0.0.0-20200722140627-2194e5077f13/go.mod h1:LEdAMqa1v/7KYe4b13ALLkonuDxLph57ibUb50ctvJk= +github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mongodb.org/atlas v0.7.1 h1:hNBtwtKgmhB9vmSX/JyN/cArmhzyy4ihKpmXSMIc4mw= +go.mongodb.org/atlas v0.7.1/go.mod h1:CIaBeO8GLHhtYLw7xSSXsw7N90Z4MFY87Oy9qcPyuEs= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1072,17 +1556,19 @@ go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1093,21 +1579,30 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1142,6 +1637,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1171,15 +1667,19 @@ golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1188,12 +1688,15 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1209,13 +1712,15 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1227,6 +1732,7 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1234,10 +1740,14 @@ golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1247,19 +1757,23 @@ golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1268,28 +1782,36 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -1312,8 +1834,9 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1326,9 +1849,14 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1337,6 +1865,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1353,6 +1882,7 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1364,10 +1894,13 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200416214402-fc959738d646/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1375,6 +1908,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -1384,7 +1918,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= @@ -1407,12 +1940,15 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1432,6 +1968,7 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1447,7 +1984,10 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200323114720-3f67cca34472/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200416231807-8751e049a2a0/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1475,6 +2015,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1485,6 +2026,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1493,12 +2036,15 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1509,6 +2055,7 @@ gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1541,6 +2088,7 @@ k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= k8s.io/api v0.15.7/go.mod h1:a/tUxscL+UxvYyA7Tj5DRc8ivYqJIO1Y5KDdlI6wSvo= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.19.1/go.mod h1:+u/k4/K/7vp4vsfdT7dyl8Oxk1F26Md4g5F26Tu85PU= @@ -1550,21 +2098,20 @@ k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= -k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= +k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= +k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apiextensions-apiserver v0.15.7/go.mod h1:ctb/NYtsiBt6CGN42Z+JrOkxi9nJYaKZYmatJ6SUy0Y= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= -k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= -k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= -k8s.io/apimachinery v0.0.0-20181116115711-1b0702fe2927/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= +k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.15.7/go.mod h1:Xc10RHc1U+F/e9GCloJ8QAeCGevSVP5xhOhqlE+e1kM= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= @@ -1574,17 +2121,17 @@ k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= -k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= +k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.15.7/go.mod h1:d5Dbyt588GbBtUnbx9fSK+pYeqgZa32op+I1BmXiNuE= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= -k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.21.1 h1:wTRcid53IhxhbFt4KTrFSw8tAncfr01EP91lzfcygVg= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= k8s.io/client-go v0.15.7/go.mod h1:QMNB76d3lKPvPQdOOnnxUF693C3hnCzUbC2umg70pWA= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.19.1/go.mod h1:AZOIVSI9UUtQPeJD3zJFp15CEhSjRgAuQP5PWRJrCIQ= @@ -1593,28 +2140,24 @@ k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= -k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= +k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= +k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/cloud-provider v0.21.1 h1:V7ro0ZuxMBNYVH4lJKxCdI+h2bQ7EApC5f7sQYrQLVE= k8s.io/cloud-provider v0.21.1/go.mod h1:GgiRu7hOsZh3+VqMMbfLJJS9ZZM9A8k/YiZG8zkWpX4= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/code-generator v0.15.7/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.15.7/go.mod h1:iunfIII6uq3NC3S/EhBpKv8+eQ76vwlOYdFpyIeBk7g= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= -k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= -k8s.io/component-helpers v0.21.1 h1:jhi4lHGHOV6mbPqNfITVUoLC3kNFkBQQO1rDDpnThAw= -k8s.io/component-helpers v0.21.1/go.mod h1:FtC1flbiQlosHQrLrRUulnKxE4ajgWCGy/67fT2GRlQ= +k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og= +k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/controller-manager v0.21.1 h1:IFbukN4M0xl3OHEasNQ91h2MLEAMk3uQrBU4+Edka8w= k8s.io/controller-manager v0.21.1/go.mod h1:8ugs8DCcHqybiwdVERhnnyGoS5Ksq/ea1p2B0CosHyc= k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1644,6 +2187,7 @@ k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= @@ -1656,32 +2200,29 @@ k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= -sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= -sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= +sigs.k8s.io/controller-runtime v0.9.6/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= -sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= -sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0 h1:4kyxBJ/3fzLooWOZkx5NEO/pUN6woM9JBnHuyWzqkc8= sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.1.0/go.mod h1:DhZ52sQMJHW21+JXyA2LRUPRIxKnrNrwh+QFV+2tVA4= sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= @@ -1692,9 +2233,11 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/images/Makefile b/images/Makefile index 6dd5e3e33d93..040e9d3f9b21 100644 --- a/images/Makefile +++ b/images/Makefile @@ -29,17 +29,8 @@ cross.%: ceph.%: @$(MAKE) -C ceph PLATFORM=$* -nfs.%: - @$(MAKE) -C nfs PLATFORM=$* - -cassandra.%: - @$(MAKE) -C cassandra PLATFORM=$* - - -do.build.images.%: $(foreach i,$(IMAGES), $(i).%); - -do.build: do.build.images.$(PLATFORM) ; -build.all: $(foreach p,$(PLATFORMS), do.build.images.$(p)) ; ## Build images for all platforms. +do.build: ceph.$(PLATFORM) ; +build.all: $(foreach p,$(PLATFORMS), ceph.$(p)) ; ## Build images for all platforms. # ==================================================================================== # Help diff --git a/images/cassandra/Dockerfile b/images/cassandra/Dockerfile deleted file mode 100644 index a384a5183b0c..000000000000 --- a/images/cassandra/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -#Copyright 2018 The Rook Authors. All rights reserved. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -FROM alpine:3.12 - -ARG ARCH -ARG TINI_VERSION -ARG JOLOKIA_VERSION=1.6.2 - -ADD rook /usr/local/bin/ - -# Add files for the sidecar -RUN mkdir -p /sidecar -RUN mkdir -p /sidecar/plugins - -ADD rook /sidecar/ -# Jolokia plugin for JMX<->HTTP -ADD "https://search.maven.org/remotecontent?filepath=org/jolokia/jolokia-jvm/${JOLOKIA_VERSION}/jolokia-jvm-${JOLOKIA_VERSION}-agent.jar" /sidecar/plugins/jolokia.jar -# JMX exporter for prometheus metrics -ADD "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.11.0/jmx_prometheus_javaagent-0.11.0.jar" /sidecar/plugins/jmx_prometheus.jar - -# Run tini as PID 1 and avoid signal handling issues -ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${ARCH} /sidecar/tini -RUN chmod +x /sidecar/tini && chmod +x /usr/local/bin/rook - - -ENTRYPOINT ["/sidecar/tini", "--", "/usr/local/bin/rook"] -CMD [""] diff --git a/images/cassandra/Makefile b/images/cassandra/Makefile deleted file mode 100644 index 72a469633922..000000000000 --- a/images/cassandra/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -#Copyright 2018 The Rook Authors. All rights reserved. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - - -include ../image.mk - -# ==================================================================================== -# Image Build Options - -CASSANDRA_OPERATOR_IMAGE = $(BUILD_REGISTRY)/cassandra-$(GOARCH) - -TEMP := $(shell mktemp -d) - -# ==================================================================================== -# Build Rook Cassandra - -do.build: - @echo === container build $(CASSANDRA_OPERATOR_IMAGE) - @cp Dockerfile $(TEMP) - @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rook $(TEMP) - @$(DOCKERCMD) build $(BUILD_ARGS) \ - --build-arg ARCH=$(GOARCH) \ - --build-arg TINI_VERSION=$(TINI_VERSION) \ - -t $(CASSANDRA_OPERATOR_IMAGE) \ - $(TEMP) - @rm -fr $(TEMP) diff --git a/images/ceph/Makefile b/images/ceph/Makefile index 5f00e7609bb8..23ce28f086c9 100755 --- a/images/ceph/Makefile +++ b/images/ceph/Makefile @@ -18,9 +18,9 @@ include ../image.mk # Image Build Options ifeq ($(GOARCH),amd64) -CEPH_VERSION = v16.2.5-20210708 +CEPH_VERSION = v16.2.6-20210918 else -CEPH_VERSION = v16.2.5-20210708 +CEPH_VERSION = v16.2.6-20210918 endif REGISTRY_NAME = quay.io BASEIMAGE = $(REGISTRY_NAME)/ceph/ceph-$(GOARCH):$(CEPH_VERSION) @@ -29,6 +29,7 @@ OPERATOR_SDK_VERSION = v0.17.1 # TODO: update to yq v4 - v3 end of life in Aug 2021 ; v4 removes the 'yq delete' cmd and changes syntax YQ_VERSION = 3.3.0 GOHOST := GOOS=$(GOHOSTOS) GOARCH=$(GOHOSTARCH) go +MANIFESTS_DIR=../../cluster/examples/kubernetes/ceph TEMP := $(shell mktemp -d) @@ -46,6 +47,11 @@ $(info NOT INCLUDING OLM/CSV TEMPLATES!) $(info ) endif +VOL_REPL_VERSION = v0.1.0 +VOL_REPL_URL = https://raw.githubusercontent.com/csi-addons/volume-replication-operator/$(VOL_REPL_VERSION)/config/crd/bases +VOLUME_REPLICATION_CRD = replication.storage.openshift.io_volumereplications.yaml +VOLUME_REPLICATION_CLASS_CRD = replication.storage.openshift.io_volumereplicationclasses.yaml + OPERATOR_SDK := $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION) YQ := $(TOOLS_HOST_DIR)/yq-$(YQ_VERSION) export OPERATOR_SDK YQ @@ -53,24 +59,27 @@ export OPERATOR_SDK YQ # ==================================================================================== # Build Rook -do.build: generate-csv-ceph-templates +do.build: @echo === container build $(CEPH_IMAGE) @cp Dockerfile $(TEMP) @cp toolbox.sh $(TEMP) @cp set-ceph-debug-level $(TEMP) @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rook $(TEMP) @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rookflex $(TEMP) - @cp -r ../../cluster/examples/kubernetes/ceph/csi/template $(TEMP)/ceph-csi - @cp -r ../../cluster/examples/kubernetes/ceph/monitoring $(TEMP)/ceph-monitoring + @cp -r $(MANIFESTS_DIR)/csi/template $(TEMP)/ceph-csi + @cp -r $(MANIFESTS_DIR)/monitoring $(TEMP)/ceph-monitoring @mkdir -p $(TEMP)/rook-external/test-data - @cp ../../cluster/examples/kubernetes/ceph/create-external-cluster-resources.* $(TEMP)/rook-external/ - @cp ../../cluster/examples/kubernetes/ceph/test-data/ceph-status-out $(TEMP)/rook-external/test-data/ - @if [ ! "$(INCLUDE_CSV_TEMPLATES)" = "" ]; then\ - cp -r ../../cluster/olm/ceph/templates $(TEMP)/ceph-csv-templates;\ - else\ - mkdir $(TEMP)/ceph-csv-templates;\ - fi - @cd $(TEMP) && $(SED_CMD) 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile + @cp $(MANIFESTS_DIR)/create-external-cluster-resources.* $(TEMP)/rook-external/ + @cp $(MANIFESTS_DIR)/test-data/ceph-status-out $(TEMP)/rook-external/test-data/ + +ifeq ($(INCLUDE_CSV_TEMPLATES),true) + @$(MAKE) CSV_TEMPLATE_DIR=$(TEMP) generate-csv-templates + @$(MAKE) CRD_TEMPLATE_DIR=$(TEMP)/cluster/olm/ceph/templates/crds/ get-volume-replication-crds + @cp -r $(TEMP)/cluster/olm/ceph/templates $(TEMP)/ceph-csv-templates +else + mkdir $(TEMP)/ceph-csv-templates +endif + @cd $(TEMP) && $(SED_IN_PLACE) 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile @if [ -z "$(BUILD_CONTAINER_IMAGE)" ]; then\ $(DOCKERCMD) build $(BUILD_ARGS) \ --build-arg ARCH=$(GOARCH) \ @@ -79,39 +88,50 @@ do.build: generate-csv-ceph-templates $(TEMP);\ fi @rm -fr $(TEMP) - @$(MAKE) -C ../.. crds # revert changes made to the crds.yaml file during the csv-gen sequence - -generate-csv-ceph-templates: $(OPERATOR_SDK) $(YQ) - @if [ ! "$(INCLUDE_CSV_TEMPLATES)" = "" ]; then\ - if [ "$(GOARCH)" = amd64 ]; then\ - BEFORE_GEN_CRD_SIZE=$$(wc -l < ../../cluster/examples/kubernetes/ceph/crds.yaml);\ - $(MAKE) -C ../.. NO_OB_OBC_VOL_GEN=true MAX_DESC_LEN=0 crds;\ - AFTER_GEN_CRD_SIZE=$$(wc -l < ../../cluster/examples/kubernetes/ceph/crds.yaml);\ - if [ "$$BEFORE_GEN_CRD_SIZE" -le "$$AFTER_GEN_CRD_SIZE" ]; then\ - echo "the new crd file must be smaller since the description fields were stripped!";\ - echo "length before $$BEFORE_GEN_CRD_SIZE";\ - echo "length after $$AFTER_GEN_CRD_SIZE";\ - exit 1;\ - fi;\ - fi;\ - ../../cluster/olm/ceph/generate-rook-csv-templates.sh;\ + +# generate CSV template files into the directory defined by the env var CSV_TEMPLATE_DIR +# CSV_TEMPLATE_DIR will be created if it doesn't already exist +generate-csv-templates: $(OPERATOR_SDK) $(YQ) ## Generate CSV templates for OLM into CSV_TEMPLATE_DIR + @if [[ -z "$(CSV_TEMPLATE_DIR)" ]]; then echo "CSV_TEMPLATE_DIR is not set"; exit 1; fi + @# first, copy the existing CRDs and OLM catalog directory to CSV_TEMPLATE_DIR + @# then, generate or copy all prerequisites into CSV_TEMPLATE_DIR (e.g., CRDs) + @# finally, generate the templates in-place using CSV_TEMPLATE_DIR as a staging dir + @mkdir -p $(CSV_TEMPLATE_DIR) + @cp -a ../../cluster $(CSV_TEMPLATE_DIR)/cluster + @set -eE;\ + BEFORE_GEN_CRD_SIZE=$$(wc -l < $(MANIFESTS_DIR)/crds.yaml);\ + $(MAKE) -C ../.. NO_OB_OBC_VOL_GEN=true MAX_DESC_LEN=0 BUILD_CRDS_INTO_DIR=$(CSV_TEMPLATE_DIR) crds;\ + AFTER_GEN_CRD_SIZE=$$(wc -l < $(CSV_TEMPLATE_DIR)/cluster/examples/kubernetes/ceph/crds.yaml);\ + if [ "$$BEFORE_GEN_CRD_SIZE" -le "$$AFTER_GEN_CRD_SIZE" ]; then\ + echo "the new crd file must be smaller since the description fields were stripped!";\ + echo "length before $$BEFORE_GEN_CRD_SIZE";\ + echo "length after $$AFTER_GEN_CRD_SIZE";\ + exit 1;\ fi + @OLM_CATALOG_DIR=$(CSV_TEMPLATE_DIR)/cluster/olm/ceph ../../cluster/olm/ceph/generate-rook-csv-templates.sh + @echo " === Generated CSV templates can be found at $(CSV_TEMPLATE_DIR)/cluster/olm/ceph/templates" -$(YQ): - @if [ ! "$(INCLUDE_CSV_TEMPLATES)" = "" ]; then\ - echo === installing yq $(GOHOST);\ - mkdir -p $(TOOLS_HOST_DIR);\ - curl -JL https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(HOST_PLATFORM) -o $(YQ);\ - chmod +x $(YQ);\ +get-volume-replication-crds: + @if [[ -z "$(CRD_TEMPLATE_DIR)" ]]; then echo "CRD_TEMPLATE_DIR is not set"; exit 1; fi + @if [[ ! -d "$(CACHE_DIR)/crds" ]]; then\ + mkdir -p $(CACHE_DIR)/crds;\ + curl -L $(VOL_REPL_URL)/$(VOLUME_REPLICATION_CRD) -o $(CACHE_DIR)/crds/$(VOLUME_REPLICATION_CRD);\ + curl -L $(VOL_REPL_URL)/$(VOLUME_REPLICATION_CLASS_CRD) -o $(CACHE_DIR)/crds/$(VOLUME_REPLICATION_CLASS_CRD);\ fi + @cp $(CACHE_DIR)/crds/* $(CRD_TEMPLATE_DIR) + +$(YQ): + @echo === installing yq $(GOHOST) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -JL https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(HOST_PLATFORM) -o $(YQ) + @chmod +x $(YQ) $(OPERATOR_SDK): - @if [ ! "$(INCLUDE_CSV_TEMPLATES)" = "" ]; then\ - echo === installing operator-sdk $(GOHOST);\ - mkdir -p $(TOOLS_HOST_DIR);\ - curl -JL https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk-$(OPERATOR_SDK_VERSION)-$(OPERATOR_SDK_PLATFORM) -o $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION);\ - chmod +x $(OPERATOR_SDK);\ - fi + @echo === installing operator-sdk $(GOHOST) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -JL -o $(TOOLS_HOST_DIR)/operator-sdk-$(OPERATOR_SDK_VERSION) \ + https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk-$(OPERATOR_SDK_VERSION)-$(OPERATOR_SDK_PLATFORM) + @chmod +x $(OPERATOR_SDK) csv: $(OPERATOR_SDK) $(YQ) ## Generate a CSV file for OLM. @echo Generating CSV manifests @@ -119,3 +139,16 @@ csv: $(OPERATOR_SDK) $(YQ) ## Generate a CSV file for OLM. csv-clean: $(OPERATOR_SDK) $(YQ) ## Remove existing OLM files. @rm -fr ../../cluster/olm/ceph/deploy/* ../../cluster/olm/ceph/templates/* + +# reading from a file and outputting to the same file can have undefined results, so use this intermediate +IMAGE_TMP="/tmp/rook-ceph-image-list" +list-image: ## Create a list of images for offline installation + @echo "producing list of images for offline installation" + rm -f $(IMAGE_TMP) + awk '/image:/ {print $2}' $(MANIFESTS_DIR)/operator.yaml $(MANIFESTS_DIR)/cluster.yaml | \ + cut -d: -f2- | tee $(IMAGE_TMP) + awk '/quay.io/ || /k8s.gcr.io/ {print $3}' ../../pkg/operator/ceph/csi/spec.go | \ + cut -d= -f2- | tr -d '"' | tee -a $(IMAGE_TMP) + rm -f $(MANIFESTS_DIR)/images.txt + cat $(IMAGE_TMP) | sort -h | uniq | tee $(MANIFESTS_DIR)/images.txt + rm -f $(IMAGE_TMP) diff --git a/images/cross/Dockerfile b/images/cross/Dockerfile index 1b914b9cbe7a..31c316bf49f1 100644 --- a/images/cross/Dockerfile +++ b/images/cross/Dockerfile @@ -37,8 +37,8 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # install golang from the official repo -RUN GO_VERSION=1.16.3 && \ - GO_HASH=951a3c7c6ce4e56ad883f97d9db74d3d6d80d5fec77455c6ada6c1f7ac4776d2 && \ +RUN GO_VERSION=1.16.7 && \ + GO_HASH=7fe7a73f55ba3e2285da36f8b085e5c0159e9564ef5f63ee0ed6b818ade8ef04 && \ curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz -o golang.tar.gz && \ echo "${GO_HASH} golang.tar.gz" | sha256sum -c - && \ tar -C /usr/local -xzf golang.tar.gz && \ diff --git a/images/nfs/Dockerfile b/images/nfs/Dockerfile deleted file mode 100644 index d7d13e14f6c8..000000000000 --- a/images/nfs/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2018 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#Portions of this file came from https://github.com/mitcdh/docker-nfs-ganesha/blob/master/Dockerfile, which uses the same license. - -FROM NFS_BASEIMAGE -# Build ganesha from source, installing deps and removing them in one line. -# Why? -# 1. Root_Id_Squash, only present in >= 2.4.0.3 which is not yet packaged -# 2. Set NFS_V4_RECOV_ROOT to /export -# 3. Use device major/minor as fsid major/minor to work on OverlayFS - -RUN DEBIAN_FRONTEND=noninteractive \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FE869A9 \ - && echo "deb http://ppa.launchpad.net/gluster/nfs-ganesha-2.7/ubuntu xenial main" > /etc/apt/sources.list.d/nfs-ganesha-2.5.list \ - && echo "deb http://ppa.launchpad.net/gluster/libntirpc-1.7/ubuntu xenial main" > /etc/apt/sources.list.d/libntirpc-1.5.list \ - && echo "deb http://ppa.launchpad.net/gluster/glusterfs-5/ubuntu xenial main" > /etc/apt/sources.list.d/glusterfs-3.13.list \ - && apt-get update \ - && apt-get install -y netbase nfs-common dbus nfs-ganesha nfs-ganesha-vfs glusterfs-common xfsprogs \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ - && mkdir -p /run/rpcbind /export /var/run/dbus \ - && touch /run/rpcbind/rpcbind.xdr /run/rpcbind/portmap.xdr \ - && chmod 755 /run/rpcbind/* \ - && chown messagebus:messagebus /var/run/dbus - -EXPOSE 2049 38465-38467 662 111/udp 111 - -COPY rook /usr/local/bin/ - -ENTRYPOINT ["/usr/local/bin/rook"] -CMD [""] diff --git a/images/nfs/Makefile b/images/nfs/Makefile deleted file mode 100755 index 88847221aa2a..000000000000 --- a/images/nfs/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2018 The Rook Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include ../image.mk - -# ==================================================================================== -# Image Build Options - -NFS_IMAGE = $(BUILD_REGISTRY)/nfs-$(GOARCH) - -NFS_BASE ?= ubuntu:xenial - -ifeq ($(GOARCH),amd64) -NFS_BASEIMAGE = $(NFS_BASE) -else ifeq ($(GOARCH),arm64) -NFS_BASEIMAGE = arm64v8/$(NFS_BASE) -endif - -TEMP := $(shell mktemp -d) - -# ==================================================================================== -# Build Rook NFS - -# since this is a leaf image we avoid leaving around a lot of dangling images -# by removing the last build of the final nfs image -OLD_IMAGE_ID := $(shell $(DOCKERCMD) images -q $(NFS_IMAGE)) -CURRENT_IMAGE_ID := $$($(DOCKERCMD) images -q $(NFS_IMAGE)) -IMAGE_FILENAME := $(IMAGE_OUTPUT_DIR)/nfs.tar.gz - -do.build: - @echo === container build $(NFS_IMAGE) - @cp Dockerfile $(TEMP) - @cp $(OUTPUT_DIR)/bin/linux_$(GOARCH)/rook $(TEMP) - @cd $(TEMP) && $(SED_CMD) 's|NFS_BASEIMAGE|$(NFS_BASEIMAGE)|g' Dockerfile - @$(DOCKERCMD) build $(BUILD_ARGS) \ - -t $(NFS_IMAGE) \ - $(TEMP) - @[ "$(OLD_IMAGE_ID)" != "$(CURRENT_IMAGE_ID)" ] && [ -n "$(OLD_IMAGE_ID)" ] && $(DOCKERCMD) rmi $(OLD_IMAGE_ID) || true - @if [ ! -e "$(IMAGE_FILENAME)" ] || [ "$(OLD_IMAGE_ID)" != "$(CURRENT_IMAGE_ID)" ] || [ -n "$(OLD_IMAGE_ID)" ]; then \ - echo === saving image $(NFS_IMAGE); \ - mkdir -p $(IMAGE_OUTPUT_DIR); \ - $(DOCKERCMD) save $(NFS_IMAGE) | gzip -c > $(IMAGE_FILENAME); \ - fi - @rm -fr $(TEMP) diff --git a/pkg/apis/cassandra.rook.io/register.go b/pkg/apis/cassandra.rook.io/register.go deleted file mode 100644 index 6d454b1bc04e..000000000000 --- a/pkg/apis/cassandra.rook.io/register.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cassandrarookio - -const ( - // CustomResourceGroupName for the cassandra operator's CRDs - CustomResourceGroupName = "cassandra.rook.io" -) diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/doc.go b/pkg/apis/cassandra.rook.io/v1alpha1/doc.go deleted file mode 100644 index c4e4e602adf9..000000000000 --- a/pkg/apis/cassandra.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=cassandra.rook.io -package v1alpha1 diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/register.go b/pkg/apis/cassandra.rook.io/v1alpha1/register.go deleted file mode 100644 index aa04a8c97931..000000000000 --- a/pkg/apis/cassandra.rook.io/v1alpha1/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/rook/rook/pkg/apis/cassandra.rook.io" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - CustomResourceGroup = "cassandra.rook.io" - Version = "v1alpha1" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: cassandrarookio.CustomResourceGroupName, Version: Version} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/types.go b/pkg/apis/cassandra.rook.io/v1alpha1/types.go deleted file mode 100644 index 99881d30ffd5..000000000000 --- a/pkg/apis/cassandra.rook.io/v1alpha1/types.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/rook/rook/pkg/apis/rook.io" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - APIVersion = CustomResourceGroup + "/" + Version - - // These are valid condition statuses. "ConditionTrue" means a resource is in the condition; - // "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes - // can't decide if a resource is in the condition or not. - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// *************************************************************************** -// IMPORTANT FOR CODE GENERATION -// If the types in this file are updated, you will need to run -// `make codegen` to generate the new types under the client/clientset folder. -// *************************************************************************** - -// Kubernetes API Conventions: -// https://github.com/kubernetes/community/blob/af5c40530f50c3b36c13438187b311102093ede5/contributors/devel/api-conventions.md -// Applicable Here: -// * Optional fields use a pointer to correctly handle empty values. - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type Cluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ClusterSpec `json:"spec"` - // +optional - // +nullable - Status ClusterStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []Cluster `json:"items"` -} - -// ClusterSpec is the desired state for a Cassandra Cluster. -type ClusterSpec struct { - // The annotations-related configuration to add/set on each Pod related object. - // +optional - // +nullable - Annotations rook.Annotations `json:"annotations,omitempty"` - // Version of Cassandra to use. - Version string `json:"version"` - // Repository to pull the image from. - // +optional - // +nullable - Repository *string `json:"repository,omitempty"` - // Mode selects an operating mode. - // +optional - Mode ClusterMode `json:"mode,omitempty"` - // Datacenter that will make up this cluster. - // +optional - // +nullable - Datacenter DatacenterSpec `json:"datacenter,omitempty"` - // User-provided image for the sidecar that replaces default. - // +optional - // +nullable - SidecarImage *ImageSpec `json:"sidecarImage,omitempty"` -} - -type ClusterMode string - -const ( - ClusterModeCassandra ClusterMode = "cassandra" - ClusterModeScylla ClusterMode = "scylla" -) - -// DatacenterSpec is the desired state for a Cassandra Datacenter. -type DatacenterSpec struct { - // Name of the Cassandra Datacenter. Used in the cassandra-rackdc.properties file. - Name string `json:"name"` - // Racks of the specific Datacenter. - Racks []RackSpec `json:"racks"` -} - -// RackSpec is the desired state for a Cassandra Rack. -type RackSpec struct { - // Name of the Cassandra Rack. Used in the cassandra-rackdc.properties file. - Name string `json:"name"` - // Members is the number of Cassandra instances in this rack. - Members int32 `json:"members"` - // User-provided ConfigMap applied to the specific statefulset. - // +optional - // +nullable - ConfigMapName *string `json:"configMapName,omitempty"` - // User-provided ConfigMap for jmx prometheus exporter - // +optional - // +nullable - JMXExporterConfigMapName *string `json:"jmxExporterConfigMapName,omitempty"` - // Storage describes the underlying storage that Cassandra will consume. - Storage StorageScopeSpec `json:"storage,omitempty"` - // The annotations-related configuration to add/set on each Pod related object. - // +optional - // +nullable - Annotations map[string]string `json:"annotations,omitempty"` - // Placement describes restrictions for the nodes Cassandra is scheduled on. - // +optional - // +nullable - Placement *Placement `json:"placement,omitempty"` - // Resources the Cassandra Pods will use. - // +optional - // +nullable - Resources corev1.ResourceRequirements `json:"resources,omitempty"` -} - -// ImageSpec is the desired state for a container image. -type ImageSpec struct { - // Version of the image. - Version string `json:"version"` - // Repository to pull the image from. - // +optional - Repository string `json:"repository,omitempty"` -} - -// ClusterStatus is the status of a Cassandra Cluster -type ClusterStatus struct { - Racks map[string]*RackStatus `json:"racks,omitempty"` -} - -// RackStatus is the status of a Cassandra Rack -type RackStatus struct { - // Members is the current number of members requested in the specific Rack - Members int32 `json:"members"` - // ReadyMembers is the number of ready members in the specific Rack - ReadyMembers int32 `json:"readyMembers"` - // Conditions are the latest available observations of a rack's state. - Conditions []RackCondition `json:"conditions,omitempty"` -} - -// RackCondition is an observation about the state of a rack. -type RackCondition struct { - Type RackConditionType `json:"type"` - Status ConditionStatus `json:"status"` -} - -type RackConditionType string - -const ( - RackConditionTypeMemberLeaving RackConditionType = "MemberLeaving" -) - -type ConditionStatus string - -type StorageScopeSpec struct { - // +nullable - // +optional - Nodes []Node `json:"nodes,omitempty"` - - // PersistentVolumeClaims to use as storage - // +optional - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` -} - -// Node is a storage nodes -// +nullable -type Node struct { - // +optional - Name string `json:"name,omitempty"` -} - -// Placement is the placement for an object -type Placement struct { - // NodeAffinity is a group of node affinity scheduling rules - // +optional - NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` - // PodAffinity is a group of inter pod affinity scheduling rules - // +optional - PodAffinity *v1.PodAffinity `json:"podAffinity,omitempty"` - // PodAntiAffinity is a group of inter pod anti affinity scheduling rules - // +optional - PodAntiAffinity *v1.PodAntiAffinity `json:"podAntiAffinity,omitempty"` - // The pod this Toleration is attached to tolerates any taint that matches - // the triple using the matching operator - // +optional - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // TopologySpreadConstraint specifies how to spread matching pods among the given topology - // +optional - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` -} diff --git a/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index c1b46e0bb5cc..000000000000 --- a/pkg/apis/cassandra.rook.io/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,358 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - rookio "github.com/rook/rook/pkg/apis/rook.io" - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Cluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterList) DeepCopyInto(out *ClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. -func (in *ClusterList) DeepCopy() *ClusterList { - if in == nil { - return nil - } - out := new(ClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(rookio.Annotations, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Repository != nil { - in, out := &in.Repository, &out.Repository - *out = new(string) - **out = **in - } - in.Datacenter.DeepCopyInto(&out.Datacenter) - if in.SidecarImage != nil { - in, out := &in.SidecarImage, &out.SidecarImage - *out = new(ImageSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.Racks != nil { - in, out := &in.Racks, &out.Racks - *out = make(map[string]*RackStatus, len(*in)) - for key, val := range *in { - var outVal *RackStatus - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(RackStatus) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DatacenterSpec) DeepCopyInto(out *DatacenterSpec) { - *out = *in - if in.Racks != nil { - in, out := &in.Racks, &out.Racks - *out = make([]RackSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatacenterSpec. -func (in *DatacenterSpec) DeepCopy() *DatacenterSpec { - if in == nil { - return nil - } - out := new(DatacenterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. -func (in *ImageSpec) DeepCopy() *ImageSpec { - if in == nil { - return nil - } - out := new(ImageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(v1.PodAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(v1.PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RackCondition) DeepCopyInto(out *RackCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RackCondition. -func (in *RackCondition) DeepCopy() *RackCondition { - if in == nil { - return nil - } - out := new(RackCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RackSpec) DeepCopyInto(out *RackSpec) { - *out = *in - if in.ConfigMapName != nil { - in, out := &in.ConfigMapName, &out.ConfigMapName - *out = new(string) - **out = **in - } - if in.JMXExporterConfigMapName != nil { - in, out := &in.JMXExporterConfigMapName, &out.JMXExporterConfigMapName - *out = new(string) - **out = **in - } - in.Storage.DeepCopyInto(&out.Storage) - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Placement != nil { - in, out := &in.Placement, &out.Placement - *out = new(Placement) - (*in).DeepCopyInto(*out) - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RackSpec. -func (in *RackSpec) DeepCopy() *RackSpec { - if in == nil { - return nil - } - out := new(RackSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RackStatus) DeepCopyInto(out *RackStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]RackCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RackStatus. -func (in *RackStatus) DeepCopy() *RackStatus { - if in == nil { - return nil - } - out := new(RackStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageScopeSpec) DeepCopyInto(out *StorageScopeSpec) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]Node, len(*in)) - copy(*out, *in) - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageScopeSpec. -func (in *StorageScopeSpec) DeepCopy() *StorageScopeSpec { - if in == nil { - return nil - } - out := new(StorageScopeSpec) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/ceph.rook.io/v1/cluster.go b/pkg/apis/ceph.rook.io/v1/cluster.go index a5c7af4a20fa..a1d8ad48ff82 100644 --- a/pkg/apis/ceph.rook.io/v1/cluster.go +++ b/pkg/apis/ceph.rook.io/v1/cluster.go @@ -55,10 +55,6 @@ func (c *CephCluster) ValidateDelete() error { } func validateUpdatedCephCluster(updatedCephCluster *CephCluster, found *CephCluster) error { - if updatedCephCluster.Spec.Mon.Count > 0 && updatedCephCluster.Spec.Mon.Count%2 == 0 { - return errors.Errorf("mon count %d cannot be even, must be odd to support a healthy quorum", updatedCephCluster.Spec.Mon.Count) - } - if updatedCephCluster.Spec.DataDirHostPath != found.Spec.DataDirHostPath { return errors.Errorf("invalid update: DataDirHostPath change from %q to %q is not allowed", found.Spec.DataDirHostPath, updatedCephCluster.Spec.DataDirHostPath) } diff --git a/pkg/apis/ceph.rook.io/v1/cluster_test.go b/pkg/apis/ceph.rook.io/v1/cluster_test.go index 6fffb9ff9616..dbc169f54033 100644 --- a/pkg/apis/ceph.rook.io/v1/cluster_test.go +++ b/pkg/apis/ceph.rook.io/v1/cluster_test.go @@ -35,7 +35,7 @@ func Test_validateUpdatedCephCluster(t *testing.T) { }{ {"everything is ok", args{&CephCluster{}, &CephCluster{}}, false}, {"good mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 1}}}, &CephCluster{}}, false}, - {"even mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 2}}}, &CephCluster{}}, true}, + {"even mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 2}}}, &CephCluster{}}, false}, {"good mon count", args{&CephCluster{Spec: ClusterSpec{Mon: MonSpec{Count: 3}}}, &CephCluster{}}, false}, {"changed DataDirHostPath", args{&CephCluster{Spec: ClusterSpec{DataDirHostPath: "foo"}}, &CephCluster{Spec: ClusterSpec{DataDirHostPath: "bar"}}}, true}, {"changed HostNetwork", args{&CephCluster{Spec: ClusterSpec{Network: NetworkSpec{HostNetwork: false}}}, &CephCluster{Spec: ClusterSpec{Network: NetworkSpec{HostNetwork: true}}}}, true}, diff --git a/pkg/apis/ceph.rook.io/v1/keys.go b/pkg/apis/ceph.rook.io/v1/keys.go index 9f2fc2a53fad..c76bf01db847 100644 --- a/pkg/apis/ceph.rook.io/v1/keys.go +++ b/pkg/apis/ceph.rook.io/v1/keys.go @@ -21,13 +21,14 @@ import ( ) const ( - KeyAll = "all" - KeyMds rookcore.KeyType = "mds" - KeyMon rookcore.KeyType = "mon" - KeyMonArbiter rookcore.KeyType = "arbiter" - KeyMgr rookcore.KeyType = "mgr" - KeyOSDPrepare rookcore.KeyType = "prepareosd" - KeyOSD rookcore.KeyType = "osd" - KeyCleanup rookcore.KeyType = "cleanup" - KeyMonitoring rookcore.KeyType = "monitoring" + KeyAll = "all" + KeyMds rookcore.KeyType = "mds" + KeyMon rookcore.KeyType = "mon" + KeyMonArbiter rookcore.KeyType = "arbiter" + KeyMgr rookcore.KeyType = "mgr" + KeyOSDPrepare rookcore.KeyType = "prepareosd" + KeyOSD rookcore.KeyType = "osd" + KeyCleanup rookcore.KeyType = "cleanup" + KeyMonitoring rookcore.KeyType = "monitoring" + KeyCrashCollector rookcore.KeyType = "crashcollector" ) diff --git a/pkg/apis/ceph.rook.io/v1/labels.go b/pkg/apis/ceph.rook.io/v1/labels.go index aed8af9ae4ca..c12fdf45986f 100644 --- a/pkg/apis/ceph.rook.io/v1/labels.go +++ b/pkg/apis/ceph.rook.io/v1/labels.go @@ -57,6 +57,11 @@ func GetMonitoringLabels(a LabelsSpec) rook.Labels { return mergeAllLabelsWithKey(a, KeyMonitoring) } +// GetCrashCollectorLabels returns the Labels for the crash collector resources +func GetCrashCollectorLabels(a LabelsSpec) rook.Labels { + return mergeAllLabelsWithKey(a, KeyCrashCollector) +} + func mergeAllLabelsWithKey(a LabelsSpec, name rook.KeyType) rook.Labels { all := a.All() if all != nil { diff --git a/pkg/apis/ceph.rook.io/v1/placement.go b/pkg/apis/ceph.rook.io/v1/placement.go index 04bdafd2532f..5bbd74d9e5ad 100644 --- a/pkg/apis/ceph.rook.io/v1/placement.go +++ b/pkg/apis/ceph.rook.io/v1/placement.go @@ -15,7 +15,9 @@ limitations under the License. */ package v1 -import v1 "k8s.io/api/core/v1" +import ( + v1 "k8s.io/api/core/v1" +) func (p PlacementSpec) All() Placement { return p[KeyAll] @@ -36,7 +38,7 @@ func (p Placement) ApplyToPodSpec(t *v1.PodSpec) { t.Affinity.PodAntiAffinity = p.PodAntiAffinity.DeepCopy() } if p.Tolerations != nil { - t.Tolerations = p.Tolerations + t.Tolerations = p.mergeTolerations(t.Tolerations) } if p.TopologySpreadConstraints != nil { t.TopologySpreadConstraints = p.TopologySpreadConstraints @@ -90,6 +92,15 @@ func (p Placement) mergeNodeAffinity(nodeAffinity *v1.NodeAffinity) *v1.NodeAffi return result } +func (p Placement) mergeTolerations(tolerations []v1.Toleration) []v1.Toleration { + // no toleration is specified yet, return placement's toleration + if tolerations == nil { + return p.Tolerations + } + + return append(p.Tolerations, tolerations...) +} + // Merge returns a Placement which results from merging the attributes of the // original Placement with the attributes of the supplied one. The supplied // Placement's attributes will override the original ones if defined. @@ -105,7 +116,7 @@ func (p Placement) Merge(with Placement) Placement { ret.PodAntiAffinity = with.PodAntiAffinity } if with.Tolerations != nil { - ret.Tolerations = with.Tolerations + ret.Tolerations = ret.mergeTolerations(with.Tolerations) } if with.TopologySpreadConstraints != nil { ret.TopologySpreadConstraints = with.TopologySpreadConstraints diff --git a/pkg/apis/ceph.rook.io/v1/placement_test.go b/pkg/apis/ceph.rook.io/v1/placement_test.go index 9c7d4b2a3a40..092e45eeb982 100644 --- a/pkg/apis/ceph.rook.io/v1/placement_test.go +++ b/pkg/apis/ceph.rook.io/v1/placement_test.go @@ -165,7 +165,6 @@ func TestPlacementApplyToPodSpec(t *testing.T) { TopologySpreadConstraints: tc, } ps = &v1.PodSpec{ - Tolerations: placementTestGetTolerations("bar", "baz"), TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), } p.ApplyToPodSpec(ps) @@ -182,6 +181,17 @@ func TestPlacementApplyToPodSpec(t *testing.T) { } p.ApplyToPodSpec(ps) assert.Equal(t, 2, len(ps.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) + + p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} + to = placementTestGetTolerations("foo", "bar") + ps = &v1.PodSpec{ + Tolerations: to, + } + p.ApplyToPodSpec(ps) + assert.Equal(t, 1, len(ps.Tolerations)) + p = Placement{Tolerations: to, NodeAffinity: na, PodAntiAffinity: antiaffinity} + p.ApplyToPodSpec(ps) + assert.Equal(t, 2, len(ps.Tolerations)) } func TestPlacementMerge(t *testing.T) { @@ -218,9 +228,25 @@ func TestPlacementMerge(t *testing.T) { Tolerations: to, TopologySpreadConstraints: tc, } + var ts int64 = 10 expected = Placement{ - NodeAffinity: na, - Tolerations: to, + NodeAffinity: na, + Tolerations: []v1.Toleration{ + { + Key: "bar", + Operator: v1.TolerationOpExists, + Value: "baz", + Effect: v1.TaintEffectNoSchedule, + TolerationSeconds: &ts, + }, + { + Key: "foo", + Operator: v1.TolerationOpExists, + Value: "bar", + Effect: v1.TaintEffectNoSchedule, + TolerationSeconds: &ts, + }, + }, TopologySpreadConstraints: tc, } merged = original.Merge(with) @@ -302,3 +328,33 @@ func placementTestGenerateNodeAffinity() *v1.NodeAffinity { }, } } + +func TestMergeToleration(t *testing.T) { + // placement is nil + p := Placement{} + result := p.mergeTolerations(nil) + assert.Nil(t, result) + + placementToleration := []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpEqual, + }, + } + + p.Tolerations = placementToleration + result = p.mergeTolerations(nil) + assert.Equal(t, p.Tolerations, result) + + newToleration := []v1.Toleration{ + { + Key: "new", + Operator: v1.TolerationOpExists, + }, + } + + result = p.mergeTolerations(newToleration) + assert.Equal(t, 2, len(result)) + assert.Equal(t, placementToleration[0].Key, result[0].Key) + assert.Equal(t, newToleration[0].Key, result[1].Key) +} diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index f80b144dd66e..b5f5fc2a6e26 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -21,6 +21,7 @@ import ( rook "github.com/rook/rook/pkg/apis/rook.io" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -467,6 +468,7 @@ const ( type MonSpec struct { // Count is the number of Ceph monitors // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=9 // +optional Count int `json:"count,omitempty"` // AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) @@ -600,9 +602,10 @@ type PoolSpec struct { // +nullable DeviceClass string `json:"deviceClass,omitempty"` + // DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" // The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) // +kubebuilder:validation:Enum=none;passive;aggressive;force;"" - // +kubebuilder:default=none + // Do NOT set a default value for kubebuilder as this will override the Parameters // +optional // +nullable CompressionMode string `json:"compressionMode,omitempty"` @@ -873,6 +876,11 @@ type MirroringSpec struct { // SnapshotSchedules is the scheduling of snapshot for mirrored images/pools // +optional SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` + + // Peers represents the peers spec + // +nullable + // +optional + Peers *MirroringPeerSpec `json:"peers,omitempty"` } // SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool @@ -909,14 +917,15 @@ type QuotaSpec struct { // ErasureCodedSpec represents the spec for erasure code in a pool type ErasureCodedSpec struct { - // Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + // Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + // This is the number of OSDs that can be lost simultaneously before data cannot be recovered. // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=9 CodingChunks uint `json:"codingChunks"` - // Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + // Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + // The number of chunks required to recover an object when any single OSD is lost is the same + // as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=9 DataChunks uint `json:"dataChunks"` // The algorithm for erasure coding @@ -1278,6 +1287,8 @@ type BucketHealthCheckSpec struct { Bucket HealthCheckSpec `json:"bucket,omitempty"` // +optional LivenessProbe *ProbeSpec `json:"livenessProbe,omitempty"` + // +optional + ReadinessProbe *ProbeSpec `json:"readinessProbe,omitempty"` } // HealthCheckSpec represents the health check of an object store bucket @@ -1314,6 +1325,11 @@ type GatewaySpec struct { // +optional SSLCertificateRef string `json:"sslCertificateRef,omitempty"` + // The name of the secret that stores custom ca-bundle with root and intermediate certificates. + // +nullable + // +optional + CaBundleRef string `json:"caBundleRef,omitempty"` + // The affinity to place the rgw pods (default is to place on any available node) // +kubebuilder:pruning:PreserveUnknownFields // +nullable @@ -1420,12 +1436,59 @@ type CephObjectStoreUserList struct { // ObjectStoreUserSpec represent the spec of an Objectstoreuser type ObjectStoreUserSpec struct { - //The store the user will be created in + // The store the user will be created in // +optional Store string `json:"store,omitempty"` - //The display name for the ceph users + // The display name for the ceph users // +optional DisplayName string `json:"displayName,omitempty"` + // +optional + // +nullable + Capabilities *ObjectUserCapSpec `json:"capabilities,omitempty"` + // +optional + // +nullable + Quotas *ObjectUserQuotaSpec `json:"quotas,omitempty"` +} + +// Additional admin-level capabilities for the Ceph object store user +type ObjectUserCapSpec struct { + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + User string `json:"user,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Bucket string `json:"bucket,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + MetaData string `json:"metadata,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Usage string `json:"usage,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Zone string `json:"zone,omitempty"` +} + +// ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more +type ObjectUserQuotaSpec struct { + // Maximum bucket limit for the ceph user + // +optional + // +nullable + MaxBuckets *int `json:"maxBuckets,omitempty"` + // Maximum size limit of all objects across all the user's buckets + // See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info. + // +optional + // +nullable + MaxSize *resource.Quantity `json:"maxSize,omitempty"` + // Maximum number of objects across all the user's buckets + // +optional + // +nullable + MaxObjects *int64 `json:"maxObjects,omitempty"` } // CephObjectRealm represents a Ceph Object Store Gateway Realm @@ -1560,7 +1623,9 @@ type CephNFSList struct { // NFSGaneshaSpec represents the spec of an nfs ganesha server type NFSGaneshaSpec struct { // RADOS is the Ganesha RADOS specification - RADOS GaneshaRADOSSpec `json:"rados"` + // +nullable + // +optional + RADOS GaneshaRADOSSpec `json:"rados,omitempty"` // Server is the Ganesha Server specification Server GaneshaServerSpec `json:"server"` @@ -1568,8 +1633,16 @@ type NFSGaneshaSpec struct { // GaneshaRADOSSpec represents the specification of a Ganesha RADOS object type GaneshaRADOSSpec struct { - // Pool is the RADOS pool where NFS client recovery data is stored. - Pool string `json:"pool"` + // Pool used to represent the Ganesha's pool name in version older than 16.2.7 + // As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this + // setting will be ignored. + // +optional + Pool string `json:"pool,omitempty"` + + // PoolConfig is the RADOS pool where Ganesha data is stored. + // +nullable + // +optional + PoolConfig *PoolSpec `json:"poolConfig,omitempty"` // Namespace is the RADOS namespace where NFS client recovery data is stored. Namespace string `json:"namespace"` @@ -1633,7 +1706,6 @@ type NetworkSpec struct { // IPFamily is the single stack IPv6 or IPv4 protocol // +kubebuilder:validation:Enum=IPv4;IPv6 - // +kubebuilder:default=IPv4 // +nullable // +optional IPFamily IPFamilyType `json:"ipFamily,omitempty"` @@ -1890,6 +1962,8 @@ type StorageScopeSpec struct { Nodes []Node `json:"nodes,omitempty"` // +optional UseAllNodes bool `json:"useAllNodes,omitempty"` + // +optional + OnlyApplyOSDPlacement bool `json:"onlyApplyOSDPlacement,omitempty"` // +kubebuilder:pruning:PreserveUnknownFields // +nullable // +optional @@ -1923,6 +1997,7 @@ type Device struct { // +optional FullPath string `json:"fullpath,omitempty"` // +kubebuilder:pruning:PreserveUnknownFields + // +nullable // +optional Config map[string]string `json:"config,omitempty"` } diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go index b3030b49ec38..714046a6398d 100644 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -68,6 +68,11 @@ func (in *BucketHealthCheckSpec) DeepCopyInto(out *BucketHealthCheckSpec) { *out = new(ProbeSpec) (*in).DeepCopyInto(*out) } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } return } @@ -848,7 +853,7 @@ func (in *CephObjectStoreUser) DeepCopyInto(out *CephObjectStoreUser) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ObjectStoreUserStatus) @@ -1750,6 +1755,11 @@ func (in *FilesystemsSpec) DeepCopy() *FilesystemsSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GaneshaRADOSSpec) DeepCopyInto(out *GaneshaRADOSSpec) { *out = *in + if in.PoolConfig != nil { + in, out := &in.PoolConfig, &out.PoolConfig + *out = new(PoolSpec) + (*in).DeepCopyInto(*out) + } return } @@ -2067,6 +2077,11 @@ func (in *MirroringSpec) DeepCopyInto(out *MirroringSpec) { *out = make([]SnapshotScheduleSpec, len(*in)) copy(*out, *in) } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = new(MirroringPeerSpec) + (*in).DeepCopyInto(*out) + } return } @@ -2165,7 +2180,7 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSGaneshaSpec) DeepCopyInto(out *NFSGaneshaSpec) { *out = *in - out.RADOS = in.RADOS + in.RADOS.DeepCopyInto(&out.RADOS) in.Server.DeepCopyInto(&out.Server) return } @@ -2331,6 +2346,16 @@ func (in *ObjectStoreStatus) DeepCopy() *ObjectStoreStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectStoreUserSpec) DeepCopyInto(out *ObjectStoreUserSpec) { *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(ObjectUserCapSpec) + **out = **in + } + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = new(ObjectUserQuotaSpec) + (*in).DeepCopyInto(*out) + } return } @@ -2367,6 +2392,53 @@ func (in *ObjectStoreUserStatus) DeepCopy() *ObjectStoreUserStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectUserCapSpec) DeepCopyInto(out *ObjectUserCapSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUserCapSpec. +func (in *ObjectUserCapSpec) DeepCopy() *ObjectUserCapSpec { + if in == nil { + return nil + } + out := new(ObjectUserCapSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectUserQuotaSpec) DeepCopyInto(out *ObjectUserQuotaSpec) { + *out = *in + if in.MaxBuckets != nil { + in, out := &in.MaxBuckets, &out.MaxBuckets + *out = new(int) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + x := (*in).DeepCopy() + *out = &x + } + if in.MaxObjects != nil { + in, out := &in.MaxObjects, &out.MaxObjects + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUserQuotaSpec. +func (in *ObjectUserQuotaSpec) DeepCopy() *ObjectUserQuotaSpec { + if in == nil { + return nil + } + out := new(ObjectUserQuotaSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectZoneGroupSpec) DeepCopyInto(out *ObjectZoneGroupSpec) { *out = *in diff --git a/pkg/apis/nfs.rook.io/register.go b/pkg/apis/nfs.rook.io/register.go deleted file mode 100644 index 0a7b43d6d3c9..000000000000 --- a/pkg/apis/nfs.rook.io/register.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package nfsrookio - -const ( - CustomResourceGroupName = "nfs.rook.io" -) diff --git a/pkg/apis/nfs.rook.io/v1alpha1/doc.go b/pkg/apis/nfs.rook.io/v1alpha1/doc.go deleted file mode 100644 index c629ac00c430..000000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=nfs.rook.io -package v1alpha1 diff --git a/pkg/apis/nfs.rook.io/v1alpha1/register.go b/pkg/apis/nfs.rook.io/v1alpha1/register.go deleted file mode 100644 index a44e66114fa0..000000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - nfsrookio "github.com/rook/rook/pkg/apis/nfs.rook.io" -) - -const ( - CustomResourceGroup = "nfs.rook.io" - Version = "v1alpha1" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: nfsrookio.CustomResourceGroupName, Version: Version} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NFSServer{}, - &NFSServerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/types.go b/pkg/apis/nfs.rook.io/v1alpha1/types.go deleted file mode 100644 index 6aee04443f5c..000000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/types.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// *************************************************************************** -// IMPORTANT FOR CODE GENERATION -// If the types in this file are updated, you will need to run -// `make codegen` to generate the new types under the client/clientset folder. -// *************************************************************************** - -const ( - Finalizer = "nfsserver.nfs.rook.io" -) - -const ( - EventCreated = "Created" - EventUpdated = "Updated" - EventFailed = "Failed" -) - -type NFSServerState string - -const ( - StateInitializing NFSServerState = "Initializing" - StatePending NFSServerState = "Pending" - StateRunning NFSServerState = "Running" - StateError NFSServerState = "Error" -) - -// NFSServerStatus defines the observed state of NFSServer -type NFSServerStatus struct { - State NFSServerState `json:"state,omitempty"` - Message string `json:"message,omitempty"` - Reason string `json:"reason,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="NFS Server instance state" - -// NFSServer is the Schema for the nfsservers API -type NFSServer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NFSServerSpec `json:"spec,omitempty"` - Status NFSServerStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// NFSServerList contains a list of NFSServer -type NFSServerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []NFSServer `json:"items"` -} - -// NFSServerSpec represents the spec of NFS daemon -type NFSServerSpec struct { - // The annotations-related configuration to add/set on each Pod related object. - Annotations map[string]string `json:"annotations,omitempty"` - - // Replicas of the NFS daemon - Replicas int `json:"replicas,omitempty"` - - // The parameters to configure the NFS export - Exports []ExportsSpec `json:"exports,omitempty"` -} - -// ExportsSpec represents the spec of NFS exports -type ExportsSpec struct { - // Name of the export - Name string `json:"name,omitempty"` - - // The NFS server configuration - Server ServerSpec `json:"server,omitempty"` - - // PVC from which the NFS daemon gets storage for sharing - PersistentVolumeClaim v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` -} - -// ServerSpec represents the spec for configuring the NFS server -type ServerSpec struct { - // Reading and Writing permissions on the export - // Valid values are "ReadOnly", "ReadWrite" and "none" - // +kubebuilder:validation:Enum=ReadOnly;ReadWrite;none - AccessMode string `json:"accessMode,omitempty"` - - // This prevents the root users connected remotely from having root privileges - // Valid values are "none", "rootid", "root", and "all" - // +kubebuilder:validation:Enum=none;rootid;root;all - Squash string `json:"squash,omitempty"` - - // The clients allowed to access the NFS export - // +optional - AllowedClients []AllowedClientsSpec `json:"allowedClients,omitempty"` -} - -// AllowedClientsSpec represents the client specs for accessing the NFS export -type AllowedClientsSpec struct { - - // Name of the clients group - Name string `json:"name,omitempty"` - - // The clients that can access the share - // Values can be hostname, ip address, netgroup, CIDR network address, or all - Clients []string `json:"clients,omitempty"` - - // Reading and Writing permissions for the client to access the NFS export - // Valid values are "ReadOnly", "ReadWrite" and "none" - // Gets overridden when ServerSpec.accessMode is specified - // +kubebuilder:validation:Enum=ReadOnly;ReadWrite;none - AccessMode string `json:"accessMode,omitempty"` - - // Squash options for clients - // Valid values are "none", "rootid", "root", and "all" - // Gets overridden when ServerSpec.squash is specified - // +kubebuilder:validation:Enum=none;rootid;root;all - Squash string `json:"squash,omitempty"` -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/webhook.go b/pkg/apis/nfs.rook.io/v1alpha1/webhook.go deleted file mode 100644 index a4943cde8ba5..000000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/webhook.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "strings" - - "github.com/coreos/pkg/capnslog" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/kustomize/kyaml/sets" -) - -var ( - webhookName = "nfs-webhook" - logger = capnslog.NewPackageLogger("github.com/rook/rook", webhookName) -) - -// compile-time assertions ensures NFSServer implements webhook.Defaulter so a webhook builder -// will be registered for the mutating webhook. -var _ webhook.Defaulter = &NFSServer{} - -// Default implements webhook.Defaulter contains mutating webhook admission logic. -func (r *NFSServer) Default() { - logger.Info("default", "name", r.Name) - logger.Warning("defaulting is not supported yet") -} - -// compile-time assertions ensures NFSServer implements webhook.Validator so a webhook builder -// will be registered for the validating webhook. -var _ webhook.Validator = &NFSServer{} - -// ValidateCreate implements webhook.Validator contains validating webhook admission logic for CREATE operation -func (r *NFSServer) ValidateCreate() error { - logger.Info("validate create", "name", r.Name) - - if err := r.ValidateSpec(); err != nil { - return err - } - - return nil -} - -// ValidateUpdate implements webhook.Validator contains validating webhook admission logic for UPDATE operation -func (r *NFSServer) ValidateUpdate(old runtime.Object) error { - logger.Info("validate update", "name", r.Name) - - if err := r.ValidateSpec(); err != nil { - return err - } - - return nil -} - -// ValidateDelete implements webhook.Validator contains validating webhook admission logic for DELETE operation -func (r *NFSServer) ValidateDelete() error { - logger.Info("validate delete", "name", r.Name) - logger.Warning("validating delete event is not supported") - - return nil -} - -// ValidateSpec validate NFSServer spec. -func (r *NFSServer) ValidateSpec() error { - var allErrs field.ErrorList - - spec := r.Spec - specPath := field.NewPath("spec") - allErrs = append(allErrs, spec.validateExports(specPath)...) - - return allErrs.ToAggregate() -} - -func (r *NFSServerSpec) validateExports(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - exportsPath := parentPath.Child("exports") - allNames := sets.String{} - allPVCNames := sets.String{} - for i, export := range r.Exports { - idxPath := exportsPath.Index(i) - namePath := idxPath.Child("name") - errList := field.ErrorList{} - if allNames.Has(export.Name) { - errList = append(errList, field.Duplicate(namePath, export.Name)) - } - - pvcNamePath := idxPath.Child("persistentVolumeClaim", "claimName") - if allPVCNames.Has(export.PersistentVolumeClaim.ClaimName) { - errList = append(errList, field.Duplicate(pvcNamePath, export.PersistentVolumeClaim.ClaimName)) - } - - if len(errList) == 0 { - allNames.Insert(export.Name) - allPVCNames.Insert(export.PersistentVolumeClaim.ClaimName) - } else { - allErrs = append(allErrs, errList...) - } - - allErrs = append(allErrs, export.validateServer(idxPath)...) - } - - return allErrs -} - -func (r *ExportsSpec) validateServer(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - server := r.Server - serverPath := parentPath.Child("server") - accessModePath := serverPath.Child("accessMode") - if err := validateAccessMode(accessModePath, server.AccessMode); err != nil { - allErrs = append(allErrs, err) - } - - squashPath := serverPath.Child("squash") - if err := validateSquashMode(squashPath, server.Squash); err != nil { - allErrs = append(allErrs, err) - } - - allErrs = append(allErrs, server.validateAllowedClient(serverPath)...) - - return allErrs -} - -func (r *ServerSpec) validateAllowedClient(parentPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allowedClientsPath := parentPath.Child("allowedClients") - allNames := sets.String{} - for i, allowedClient := range r.AllowedClients { - idxPath := allowedClientsPath.Index(i) - namePath := idxPath.Child("name") - errList := field.ErrorList{} - if allNames.Has(allowedClient.Name) { - errList = append(errList, field.Duplicate(namePath, allowedClient.Name)) - } - - if len(errList) == 0 { - allNames.Insert(allowedClient.Name) - } else { - allErrs = append(allErrs, errList...) - } - - accessModePath := idxPath.Child("accessMode") - if err := validateAccessMode(accessModePath, allowedClient.AccessMode); err != nil { - allErrs = append(allErrs, err) - } - - squashPath := idxPath.Child("squash") - if err := validateSquashMode(squashPath, allowedClient.Squash); err != nil { - allErrs = append(allErrs, err) - } - } - - return allErrs -} - -func validateAccessMode(path *field.Path, mode string) *field.Error { - switch strings.ToLower(mode) { - case "readonly": - case "readwrite": - case "none": - default: - return field.Invalid(path, mode, "valid values are (ReadOnly, ReadWrite, none)") - } - return nil -} - -func validateSquashMode(path *field.Path, mode string) *field.Error { - switch strings.ToLower(mode) { - case "rootid": - case "root": - case "all": - case "none": - default: - return field.Invalid(path, mode, "valid values are (none, rootId, root, all)") - } - return nil -} diff --git a/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 7294f0cda96b..000000000000 --- a/pkg/apis/nfs.rook.io/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,194 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedClientsSpec) DeepCopyInto(out *AllowedClientsSpec) { - *out = *in - if in.Clients != nil { - in, out := &in.Clients, &out.Clients - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedClientsSpec. -func (in *AllowedClientsSpec) DeepCopy() *AllowedClientsSpec { - if in == nil { - return nil - } - out := new(AllowedClientsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExportsSpec) DeepCopyInto(out *ExportsSpec) { - *out = *in - in.Server.DeepCopyInto(&out.Server) - out.PersistentVolumeClaim = in.PersistentVolumeClaim - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportsSpec. -func (in *ExportsSpec) DeepCopy() *ExportsSpec { - if in == nil { - return nil - } - out := new(ExportsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServer) DeepCopyInto(out *NFSServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServer. -func (in *NFSServer) DeepCopy() *NFSServer { - if in == nil { - return nil - } - out := new(NFSServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NFSServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerList) DeepCopyInto(out *NFSServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NFSServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerList. -func (in *NFSServerList) DeepCopy() *NFSServerList { - if in == nil { - return nil - } - out := new(NFSServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NFSServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerSpec) DeepCopyInto(out *NFSServerSpec) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Exports != nil { - in, out := &in.Exports, &out.Exports - *out = make([]ExportsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerSpec. -func (in *NFSServerSpec) DeepCopy() *NFSServerSpec { - if in == nil { - return nil - } - out := new(NFSServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSServerStatus) DeepCopyInto(out *NFSServerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSServerStatus. -func (in *NFSServerStatus) DeepCopy() *NFSServerStatus { - if in == nil { - return nil - } - out := new(NFSServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { - *out = *in - if in.AllowedClients != nil { - in, out := &in.AllowedClients, &out.AllowedClients - *out = make([]AllowedClientsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. -func (in *ServerSpec) DeepCopy() *ServerSpec { - if in == nil { - return nil - } - out := new(ServerSpec) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/rook.io/labels.go b/pkg/apis/rook.io/labels.go index 85a6de000673..5004613d0c2c 100644 --- a/pkg/apis/rook.io/labels.go +++ b/pkg/apis/rook.io/labels.go @@ -44,6 +44,16 @@ func (a Labels) ApplyToObjectMeta(t *metav1.ObjectMeta) { } } +// OverwriteApplyToObjectMeta adds labels to object meta, overwriting keys that are already defined. +func (a Labels) OverwriteApplyToObjectMeta(t *metav1.ObjectMeta) { + if t.Labels == nil { + t.Labels = map[string]string{} + } + for k, v := range a { + t.Labels[k] = v + } +} + // Merge returns a Labels which results from merging the attributes of the // original Labels with the attributes of the supplied one. The supplied // Labels attributes will override the original ones if defined. diff --git a/pkg/apis/rook.io/labels_spec.go b/pkg/apis/rook.io/labels_test.go similarity index 69% rename from pkg/apis/rook.io/labels_spec.go rename to pkg/apis/rook.io/labels_test.go index aec8ce6415ca..7b219335843e 100644 --- a/pkg/apis/rook.io/labels_spec.go +++ b/pkg/apis/rook.io/labels_test.go @@ -77,6 +77,60 @@ func TestLabelsApply(t *testing.T) { } } +func TestLabelsOverwriteApply(t *testing.T) { + tcs := []struct { + name string + target *metav1.ObjectMeta + input Labels + expected Labels + }{ + { + name: "it should be able to update meta with no label", + target: &metav1.ObjectMeta{}, + input: Labels{ + "foo": "bar", + }, + expected: Labels{ + "foo": "bar", + }, + }, + { + name: "it should keep the original labels when new labels are set", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "hello": "world", + }, + expected: Labels{ + "foo": "bar", + "hello": "world", + }, + }, + { + name: "it should overwrite the existing keys", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "foo": "baz", + }, + expected: Labels{ + "foo": "baz", + }, + }, + } + + for _, tc := range tcs { + tc.input.OverwriteApplyToObjectMeta(tc.target) + assert.Equal(t, map[string]string(tc.expected), tc.target.Labels) + } +} + func TestLabelsMerge(t *testing.T) { testLabelsPart1 := Labels{ "foo": "bar", diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index dbc6b4c15c23..2616737351a8 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -21,9 +21,7 @@ package versioned import ( "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" rookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -32,9 +30,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Interface CephV1() cephv1.CephV1Interface - NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface RookV1alpha2() rookv1alpha2.RookV1alpha2Interface } @@ -42,15 +38,8 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - cassandraV1alpha1 *cassandrav1alpha1.CassandraV1alpha1Client - cephV1 *cephv1.CephV1Client - nfsV1alpha1 *nfsv1alpha1.NfsV1alpha1Client - rookV1alpha2 *rookv1alpha2.RookV1alpha2Client -} - -// CassandraV1alpha1 retrieves the CassandraV1alpha1Client -func (c *Clientset) CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Interface { - return c.cassandraV1alpha1 + cephV1 *cephv1.CephV1Client + rookV1alpha2 *rookv1alpha2.RookV1alpha2Client } // CephV1 retrieves the CephV1Client @@ -58,11 +47,6 @@ func (c *Clientset) CephV1() cephv1.CephV1Interface { return c.cephV1 } -// NfsV1alpha1 retrieves the NfsV1alpha1Client -func (c *Clientset) NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface { - return c.nfsV1alpha1 -} - // RookV1alpha2 retrieves the RookV1alpha2Client func (c *Clientset) RookV1alpha2() rookv1alpha2.RookV1alpha2Interface { return c.rookV1alpha2 @@ -89,18 +73,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.cassandraV1alpha1, err = cassandrav1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.cephV1, err = cephv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.nfsV1alpha1, err = nfsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.rookV1alpha2, err = rookv1alpha2.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -117,9 +93,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.cassandraV1alpha1 = cassandrav1alpha1.NewForConfigOrDie(c) cs.cephV1 = cephv1.NewForConfigOrDie(c) - cs.nfsV1alpha1 = nfsv1alpha1.NewForConfigOrDie(c) cs.rookV1alpha2 = rookv1alpha2.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) @@ -129,9 +103,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.cassandraV1alpha1 = cassandrav1alpha1.New(c) cs.cephV1 = cephv1.New(c) - cs.nfsV1alpha1 = nfsv1alpha1.New(c) cs.rookV1alpha2 = rookv1alpha2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 9708881f54ee..3f0428607d57 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -20,12 +20,8 @@ package fake import ( clientset "github.com/rook/rook/pkg/client/clientset/versioned" - cassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" - fakecassandrav1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake" cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" fakecephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake" - nfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" - fakenfsv1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake" rookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2" fakerookv1alpha2 "github.com/rook/rook/pkg/client/clientset/versioned/typed/rook.io/v1alpha2/fake" "k8s.io/apimachinery/pkg/runtime" @@ -82,21 +78,11 @@ func (c *Clientset) Tracker() testing.ObjectTracker { var _ clientset.Interface = &Clientset{} -// CassandraV1alpha1 retrieves the CassandraV1alpha1Client -func (c *Clientset) CassandraV1alpha1() cassandrav1alpha1.CassandraV1alpha1Interface { - return &fakecassandrav1alpha1.FakeCassandraV1alpha1{Fake: &c.Fake} -} - // CephV1 retrieves the CephV1Client func (c *Clientset) CephV1() cephv1.CephV1Interface { return &fakecephv1.FakeCephV1{Fake: &c.Fake} } -// NfsV1alpha1 retrieves the NfsV1alpha1Client -func (c *Clientset) NfsV1alpha1() nfsv1alpha1.NfsV1alpha1Interface { - return &fakenfsv1alpha1.FakeNfsV1alpha1{Fake: &c.Fake} -} - // RookV1alpha2 retrieves the RookV1alpha2Client func (c *Clientset) RookV1alpha2() rookv1alpha2.RookV1alpha2Interface { return &fakerookv1alpha2.FakeRookV1alpha2{Fake: &c.Fake} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index a3a07b0fcfc6..1212dc897749 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -19,9 +19,7 @@ limitations under the License. package fake import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" rookv1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -34,9 +32,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - cassandrav1alpha1.AddToScheme, cephv1.AddToScheme, - nfsv1alpha1.AddToScheme, rookv1alpha2.AddToScheme, } diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 1b4a713e4bd7..fe7c4dae7899 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -19,9 +19,7 @@ limitations under the License. package scheme import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" rookv1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -34,9 +32,7 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - cassandrav1alpha1.AddToScheme, cephv1.AddToScheme, - nfsv1alpha1.AddToScheme, rookv1alpha2.AddToScheme, } diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go deleted file mode 100644 index 33a040f16489..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cassandra.rook.io_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type CassandraV1alpha1Interface interface { - RESTClient() rest.Interface - ClustersGetter -} - -// CassandraV1alpha1Client is used to interact with features provided by the cassandra.rook.io group. -type CassandraV1alpha1Client struct { - restClient rest.Interface -} - -func (c *CassandraV1alpha1Client) Clusters(namespace string) ClusterInterface { - return newClusters(c, namespace) -} - -// NewForConfig creates a new CassandraV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*CassandraV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CassandraV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new CassandraV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CassandraV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CassandraV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *CassandraV1alpha1Client { - return &CassandraV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CassandraV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go deleted file mode 100644 index a08427fb9edd..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/cluster.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters(namespace string) ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (*v1alpha1.Cluster, error) - Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (*v1alpha1.Cluster, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Cluster, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client rest.Interface - ns string -} - -// newClusters returns a Clusters -func newClusters(c *CassandraV1alpha1Client, namespace string) *clusters { - return &clusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cluster). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cluster). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("clusters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/doc.go deleted file mode 100644 index df51baa4d4c1..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/doc.go deleted file mode 100644 index 16f44399065e..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go deleted file mode 100644 index 39a28ca2dfe5..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cassandra.rook.io_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCassandraV1alpha1 struct { - *testing.Fake -} - -func (c *FakeCassandraV1alpha1) Clusters(namespace string) v1alpha1.ClusterInterface { - return &FakeClusters{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCassandraV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go deleted file mode 100644 index 6b2493c56725..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/fake/fake_cluster.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeClusters implements ClusterInterface -type FakeClusters struct { - Fake *FakeCassandraV1alpha1 - ns string -} - -var clustersResource = schema.GroupVersionResource{Group: "cassandra.rook.io", Version: "v1alpha1", Resource: "clusters"} - -var clustersKind = schema.GroupVersionKind{Group: "cassandra.rook.io", Version: "v1alpha1", Kind: "Cluster"} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *FakeClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *FakeClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(clustersResource, clustersKind, c.ns, opts), &v1alpha1.ClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterList{ListMeta: obj.(*v1alpha1.ClusterList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *FakeClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(clustersResource, c.ns, opts)) - -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *FakeClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(clustersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{}) - return err -} - -// Patch applies the patch and returns the patched cluster. -func (c *FakeClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(clustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} diff --git a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/generated_expansion.go deleted file mode 100644 index fcf4a33967fa..000000000000 --- a/pkg/client/clientset/versioned/typed/cassandra.rook.io/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ClusterExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go deleted file mode 100644 index df51baa4d4c1..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go deleted file mode 100644 index 16f44399065e..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go deleted file mode 100644 index 547010476a05..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfs.rook.io_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNfsV1alpha1 struct { - *testing.Fake -} - -func (c *FakeNfsV1alpha1) NFSServers(namespace string) v1alpha1.NFSServerInterface { - return &FakeNFSServers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNfsV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go deleted file mode 100644 index c17661995dff..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/fake/fake_nfsserver.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeNFSServers implements NFSServerInterface -type FakeNFSServers struct { - Fake *FakeNfsV1alpha1 - ns string -} - -var nfsserversResource = schema.GroupVersionResource{Group: "nfs.rook.io", Version: "v1alpha1", Resource: "nfsservers"} - -var nfsserversKind = schema.GroupVersionKind{Group: "nfs.rook.io", Version: "v1alpha1", Kind: "NFSServer"} - -// Get takes name of the nFSServer, and returns the corresponding nFSServer object, and an error if there is any. -func (c *FakeNFSServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(nfsserversResource, c.ns, name), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// List takes label and field selectors, and returns the list of NFSServers that match those selectors. -func (c *FakeNFSServers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NFSServerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(nfsserversResource, nfsserversKind, c.ns, opts), &v1alpha1.NFSServerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.NFSServerList{ListMeta: obj.(*v1alpha1.NFSServerList).ListMeta} - for _, item := range obj.(*v1alpha1.NFSServerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nFSServers. -func (c *FakeNFSServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(nfsserversResource, c.ns, opts)) - -} - -// Create takes the representation of a nFSServer and creates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *FakeNFSServers) Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(nfsserversResource, c.ns, nFSServer), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// Update takes the representation of a nFSServer and updates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *FakeNFSServers) Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(nfsserversResource, c.ns, nFSServer), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} - -// Delete takes name of the nFSServer and deletes it. Returns an error if one occurs. -func (c *FakeNFSServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(nfsserversResource, c.ns, name), &v1alpha1.NFSServer{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNFSServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(nfsserversResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.NFSServerList{}) - return err -} - -// Patch applies the patch and returns the patched nFSServer. -func (c *FakeNFSServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(nfsserversResource, c.ns, name, pt, data, subresources...), &v1alpha1.NFSServer{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NFSServer), err -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go deleted file mode 100644 index 39cd4986fd96..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type NFSServerExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go deleted file mode 100644 index 53ab904498eb..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfs.rook.io_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type NfsV1alpha1Interface interface { - RESTClient() rest.Interface - NFSServersGetter -} - -// NfsV1alpha1Client is used to interact with features provided by the nfs.rook.io group. -type NfsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *NfsV1alpha1Client) NFSServers(namespace string) NFSServerInterface { - return newNFSServers(c, namespace) -} - -// NewForConfig creates a new NfsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*NfsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NfsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new NfsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NfsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NfsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *NfsV1alpha1Client { - return &NfsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NfsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index 8cbfd05a9497..000000000000 --- a/pkg/client/clientset/versioned/typed/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// NFSServersGetter has a method to return a NFSServerInterface. -// A group's client should implement this interface. -type NFSServersGetter interface { - NFSServers(namespace string) NFSServerInterface -} - -// NFSServerInterface has methods to work with NFSServer resources. -type NFSServerInterface interface { - Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (*v1alpha1.NFSServer, error) - Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (*v1alpha1.NFSServer, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NFSServer, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NFSServerList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) - NFSServerExpansion -} - -// nFSServers implements NFSServerInterface -type nFSServers struct { - client rest.Interface - ns string -} - -// newNFSServers returns a NFSServers -func newNFSServers(c *NfsV1alpha1Client, namespace string) *nFSServers { - return &nFSServers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the nFSServer, and returns the corresponding nFSServer object, and an error if there is any. -func (c *nFSServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NFSServers that match those selectors. -func (c *nFSServers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NFSServerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.NFSServerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nFSServers. -func (c *nFSServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a nFSServer and creates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *nFSServers) Create(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.CreateOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Post(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nFSServer). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a nFSServer and updates it. Returns the server's representation of the nFSServer, and an error, if there is any. -func (c *nFSServers) Update(ctx context.Context, nFSServer *v1alpha1.NFSServer, opts v1.UpdateOptions) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Put(). - Namespace(c.ns). - Resource("nfsservers"). - Name(nFSServer.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nFSServer). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the nFSServer and deletes it. Returns an error if one occurs. -func (c *nFSServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nFSServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("nfsservers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched nFSServer. -func (c *nFSServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NFSServer, err error) { - result = &v1alpha1.NFSServer{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("nfsservers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/interface.go b/pkg/client/informers/externalversions/cassandra.rook.io/interface.go deleted file mode 100644 index e8a00018a9f7..000000000000 --- a/pkg/client/informers/externalversions/cassandra.rook.io/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package cassandra - -import ( - v1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go deleted file mode 100644 index 368e176ce2d2..000000000000 --- a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/cluster.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - cassandrarookiov1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/listers/cassandra.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterInformer provides access to a shared informer and lister for -// Clusters. -type ClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterLister -} - -type clusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CassandraV1alpha1().Clusters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CassandraV1alpha1().Clusters(namespace).Watch(context.TODO(), options) - }, - }, - &cassandrarookiov1alpha1.Cluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&cassandrarookiov1alpha1.Cluster{}, f.defaultInformer) -} - -func (f *clusterInformer) Lister() v1alpha1.ClusterLister { - return v1alpha1.NewClusterLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go b/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go deleted file mode 100644 index f5556f18202e..000000000000 --- a/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Clusters returns a ClusterInformer. - Clusters() ClusterInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Clusters returns a ClusterInformer. -func (v *version) Clusters() ClusterInformer { - return &clusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index ca31ecada5ad..67e44ed9ebf0 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -24,10 +24,8 @@ import ( time "time" versioned "github.com/rook/rook/pkg/client/clientset/versioned" - cassandrarookio "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io" cephrookio "github.com/rook/rook/pkg/client/informers/externalversions/ceph.rook.io" internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - nfsrookio "github.com/rook/rook/pkg/client/informers/externalversions/nfs.rook.io" rookio "github.com/rook/rook/pkg/client/informers/externalversions/rook.io" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -175,24 +173,14 @@ type SharedInformerFactory interface { ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - Cassandra() cassandrarookio.Interface Ceph() cephrookio.Interface - Nfs() nfsrookio.Interface Rook() rookio.Interface } -func (f *sharedInformerFactory) Cassandra() cassandrarookio.Interface { - return cassandrarookio.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Ceph() cephrookio.Interface { return cephrookio.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Nfs() nfsrookio.Interface { - return nfsrookio.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Rook() rookio.Interface { return rookio.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 98a981182151..3c39476579b4 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -21,9 +21,7 @@ package externalversions import ( "fmt" - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - nfsrookiov1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" v1alpha2 "github.com/rook/rook/pkg/apis/rook.io/v1alpha2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -55,11 +53,7 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=cassandra.rook.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("clusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cassandra().V1alpha1().Clusters().Informer()}, nil - - // Group=ceph.rook.io, Version=v1 + // Group=ceph.rook.io, Version=v1 case v1.SchemeGroupVersion.WithResource("cephblockpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephBlockPools().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephclients"): @@ -85,10 +79,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("cephrbdmirrors"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephRBDMirrors().Informer()}, nil - // Group=nfs.rook.io, Version=v1alpha1 - case nfsrookiov1alpha1.SchemeGroupVersion.WithResource("nfsservers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Nfs().V1alpha1().NFSServers().Informer()}, nil - // Group=rook.io, Version=v1alpha2 case v1alpha2.SchemeGroupVersion.WithResource("volumes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rook().V1alpha2().Volumes().Informer()}, nil diff --git a/pkg/client/informers/externalversions/nfs.rook.io/interface.go b/pkg/client/informers/externalversions/nfs.rook.io/interface.go deleted file mode 100644 index 1e9c18384225..000000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package nfs - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go b/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go deleted file mode 100644 index c0687a846048..000000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // NFSServers returns a NFSServerInformer. - NFSServers() NFSServerInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// NFSServers returns a NFSServerInformer. -func (v *version) NFSServers() NFSServerInformer { - return &nFSServerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index d474dd54a6ec..000000000000 --- a/pkg/client/informers/externalversions/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - nfsrookiov1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - versioned "github.com/rook/rook/pkg/client/clientset/versioned" - internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/rook/rook/pkg/client/listers/nfs.rook.io/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// NFSServerInformer provides access to a shared informer and lister for -// NFSServers. -type NFSServerInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.NFSServerLister -} - -type nFSServerInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewNFSServerInformer constructs a new informer for NFSServer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewNFSServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredNFSServerInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredNFSServerInformer constructs a new informer for NFSServer type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredNFSServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NfsV1alpha1().NFSServers(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NfsV1alpha1().NFSServers(namespace).Watch(context.TODO(), options) - }, - }, - &nfsrookiov1alpha1.NFSServer{}, - resyncPeriod, - indexers, - ) -} - -func (f *nFSServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredNFSServerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *nFSServerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nfsrookiov1alpha1.NFSServer{}, f.defaultInformer) -} - -func (f *nFSServerInformer) Lister() v1alpha1.NFSServerLister { - return v1alpha1.NewNFSServerLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go b/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go deleted file mode 100644 index da83fc9a5252..000000000000 --- a/pkg/client/listers/cassandra.rook.io/v1alpha1/cluster.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterLister helps list Clusters. -// All objects returned here must be treated as read-only. -type ClusterLister interface { - // List lists all Clusters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Clusters returns an object that can list and get Clusters. - Clusters(namespace string) ClusterNamespaceLister - ClusterListerExpansion -} - -// clusterLister implements the ClusterLister interface. -type clusterLister struct { - indexer cache.Indexer -} - -// NewClusterLister returns a new ClusterLister. -func NewClusterLister(indexer cache.Indexer) ClusterLister { - return &clusterLister{indexer: indexer} -} - -// List lists all Clusters in the indexer. -func (s *clusterLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Clusters returns an object that can list and get Clusters. -func (s *clusterLister) Clusters(namespace string) ClusterNamespaceLister { - return clusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ClusterNamespaceLister helps list and get Clusters. -// All objects returned here must be treated as read-only. -type ClusterNamespaceLister interface { - // List lists all Clusters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Get retrieves the Cluster from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Cluster, error) - ClusterNamespaceListerExpansion -} - -// clusterNamespaceLister implements the ClusterNamespaceLister -// interface. -type clusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Clusters in the indexer for a given namespace. -func (s clusterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Get retrieves the Cluster from the indexer for a given namespace and name. -func (s clusterNamespaceLister) Get(name string) (*v1alpha1.Cluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("cluster"), name) - } - return obj.(*v1alpha1.Cluster), nil -} diff --git a/pkg/client/listers/cassandra.rook.io/v1alpha1/expansion_generated.go b/pkg/client/listers/cassandra.rook.io/v1alpha1/expansion_generated.go deleted file mode 100644 index 5bd821b437e7..000000000000 --- a/pkg/client/listers/cassandra.rook.io/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// ClusterListerExpansion allows custom methods to be added to -// ClusterLister. -type ClusterListerExpansion interface{} - -// ClusterNamespaceListerExpansion allows custom methods to be added to -// ClusterNamespaceLister. -type ClusterNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go b/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go deleted file mode 100644 index b89229e6203f..000000000000 --- a/pkg/client/listers/nfs.rook.io/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// NFSServerListerExpansion allows custom methods to be added to -// NFSServerLister. -type NFSServerListerExpansion interface{} - -// NFSServerNamespaceListerExpansion allows custom methods to be added to -// NFSServerNamespaceLister. -type NFSServerNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go b/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go deleted file mode 100644 index f26f51d090be..000000000000 --- a/pkg/client/listers/nfs.rook.io/v1alpha1/nfsserver.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// NFSServerLister helps list NFSServers. -// All objects returned here must be treated as read-only. -type NFSServerLister interface { - // List lists all NFSServers in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) - // NFSServers returns an object that can list and get NFSServers. - NFSServers(namespace string) NFSServerNamespaceLister - NFSServerListerExpansion -} - -// nFSServerLister implements the NFSServerLister interface. -type nFSServerLister struct { - indexer cache.Indexer -} - -// NewNFSServerLister returns a new NFSServerLister. -func NewNFSServerLister(indexer cache.Indexer) NFSServerLister { - return &nFSServerLister{indexer: indexer} -} - -// List lists all NFSServers in the indexer. -func (s *nFSServerLister) List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NFSServer)) - }) - return ret, err -} - -// NFSServers returns an object that can list and get NFSServers. -func (s *nFSServerLister) NFSServers(namespace string) NFSServerNamespaceLister { - return nFSServerNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// NFSServerNamespaceLister helps list and get NFSServers. -// All objects returned here must be treated as read-only. -type NFSServerNamespaceLister interface { - // List lists all NFSServers in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) - // Get retrieves the NFSServer from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.NFSServer, error) - NFSServerNamespaceListerExpansion -} - -// nFSServerNamespaceLister implements the NFSServerNamespaceLister -// interface. -type nFSServerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NFSServers in the indexer for a given namespace. -func (s nFSServerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.NFSServer, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NFSServer)) - }) - return ret, err -} - -// Get retrieves the NFSServer from the indexer for a given namespace and name. -func (s nFSServerNamespaceLister) Get(name string) (*v1alpha1.NFSServer, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("nfsserver"), name) - } - return obj.(*v1alpha1.NFSServer), nil -} diff --git a/pkg/daemon/ceph/client/command.go b/pkg/daemon/ceph/client/command.go index 4e4046a67265..88646c486563 100644 --- a/pkg/daemon/ceph/client/command.go +++ b/pkg/daemon/ceph/client/command.go @@ -66,7 +66,7 @@ func FinalizeCephCommandArgs(command string, clusterInfo *ClusterInfo, args []st // we could use a slice and iterate over it but since we have only 3 elements // I don't think this is worth a loop - timeout := strconv.Itoa(int(exec.CephCommandTimeout.Seconds())) + timeout := strconv.Itoa(int(exec.CephCommandsTimeout.Seconds())) if command != "rbd" && command != "crushtool" && command != "radosgw-admin" { args = append(args, "--connect-timeout="+timeout) } @@ -126,7 +126,22 @@ func NewRBDCommand(context *clusterd.Context, clusterInfo *ClusterInfo, args []s } func (c *CephToolCommand) run() ([]byte, error) { - command, args := FinalizeCephCommandArgs(c.tool, c.clusterInfo, c.args, c.context.ConfigDir) + // Initialize the command and args + command := c.tool + args := c.args + + // If this is a remote execution, we don't want to build the full set of args. For instance all + // these args are not needed since those paths don't exist inside the cmd-proxy container: + // --cluster=openshift-storage + // --conf=/var/lib/rook/openshift-storage/openshift-storage.config + // --name=client.admin + // --keyring=/var/lib/rook/openshift-storage/client.admin.keyring + // + // The cmd-proxy container will take care of the rest with the help of the env CEPH_ARGS + if !c.RemoteExecution { + command, args = FinalizeCephCommandArgs(c.tool, c.clusterInfo, c.args, c.context.ConfigDir) + } + if c.JsonOutput { args = append(args, "--format", "json") } else { @@ -144,7 +159,9 @@ func (c *CephToolCommand) run() ([]byte, error) { if command == RBDTool { if c.RemoteExecution { output, stderr, err = c.context.RemoteExecutor.ExecCommandInContainerWithFullOutputWithTimeout(ProxyAppLabel, CommandProxyInitContainerName, c.clusterInfo.Namespace, append([]string{command}, args...)...) - output = fmt.Sprintf("%s.%s", output, stderr) + if stderr != "" || err != nil { + err = errors.Errorf("%s. %s", err.Error(), stderr) + } } else if c.timeout == 0 { output, err = c.context.Executor.ExecuteCommandWithOutput(command, args...) } else { @@ -174,7 +191,7 @@ func (c *CephToolCommand) RunWithTimeout(timeout time.Duration) ([]byte, error) // configured its arguments. It is future work to integrate this case into the // generalization. func ExecuteRBDCommandWithTimeout(context *clusterd.Context, args []string) (string, error) { - output, err := context.Executor.ExecuteCommandWithTimeout(exec.CephCommandTimeout, RBDTool, args...) + output, err := context.Executor.ExecuteCommandWithTimeout(exec.CephCommandsTimeout, RBDTool, args...) return output, err } diff --git a/pkg/daemon/ceph/client/command_test.go b/pkg/daemon/ceph/client/command_test.go index ffa6744b4530..85f69c35e257 100644 --- a/pkg/daemon/ceph/client/command_test.go +++ b/pkg/daemon/ceph/client/command_test.go @@ -19,6 +19,7 @@ package client import ( "strconv" "testing" + "time" "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" @@ -35,7 +36,7 @@ func TestFinalizeCephCommandArgs(t *testing.T) { args := []string{"quorum_status"} expectedArgs := []string{ "quorum_status", - "--connect-timeout=" + strconv.Itoa(int(exec.CephCommandTimeout.Seconds())), + "--connect-timeout=" + strconv.Itoa(int(exec.CephCommandsTimeout.Seconds())), "--cluster=rook", "--conf=/var/lib/rook/rook-ceph/rook/rook.config", "--name=client.admin", @@ -98,6 +99,7 @@ func TestFinalizeCephCommandArgsToolBox(t *testing.T) { } clusterInfo := AdminClusterInfo("rook") + exec.CephCommandsTimeout = 15 * time.Second cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) assert.Exactly(t, "kubectl", cmd) assert.Exactly(t, expectedArgs, args) @@ -113,6 +115,7 @@ func TestNewRBDCommand(t *testing.T) { executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { switch { case command == "rbd" && args[0] == "create": + assert.Len(t, args, 8) return "success", nil } return "", errors.Errorf("unexpected ceph command %q", args) @@ -134,8 +137,9 @@ func TestNewRBDCommand(t *testing.T) { assert.True(t, cmd.RemoteExecution) _, err := cmd.Run() assert.Error(t, err) + assert.Len(t, cmd.args, 4) // This is not the best but it shows we go through the right codepath - assert.EqualError(t, err, "no pods found with selector \"rook-ceph-mgr\"") + assert.Contains(t, err.Error(), "no pods found with selector \"rook-ceph-mgr\"") }) } diff --git a/pkg/daemon/ceph/client/config.go b/pkg/daemon/ceph/client/config.go index 31876d1e9188..09d03b815e49 100644 --- a/pkg/daemon/ceph/client/config.go +++ b/pkg/daemon/ceph/client/config.go @@ -115,7 +115,7 @@ func generateConfigFile(context *clusterd.Context, clusterInfo *ClusterInfo, pat // create the config directory if err := os.MkdirAll(pathRoot, 0744); err != nil { - logger.Warningf("failed to create config directory at %q. %v", pathRoot, err) + return "", errors.Wrapf(err, "failed to create config directory at %q", pathRoot) } configFile, err := createGlobalConfigFileSection(context, clusterInfo, globalConfig) @@ -308,31 +308,3 @@ func WriteCephConfig(context *clusterd.Context, clusterInfo *ClusterInfo) error } return nil } - -// SetConfig applies a setting for a single mgr daemon -func SetConfig(context *clusterd.Context, clusterInfo *ClusterInfo, daemonID string, key, val string, force bool) (bool, error) { - var getArgs, setArgs []string - getArgs = append(getArgs, "config", "get", daemonID, key) - if val == "" { - setArgs = append(setArgs, "config", "rm", daemonID, key) - } else { - setArgs = append(setArgs, "config", "set", daemonID, key, val) - } - if force { - setArgs = append(setArgs, "--force") - } - - // Retrieve previous value to monitor changes - var prevVal string - buf, err := NewCephCommand(context, clusterInfo, getArgs).Run() - if err == nil { - prevVal = strings.TrimSpace(string(buf)) - } - - if _, err := NewCephCommand(context, clusterInfo, setArgs).Run(); err != nil { - return false, errors.Wrapf(err, "failed to set config key %s to %q", key, val) - } - - hasChanged := prevVal != val - return hasChanged, nil -} diff --git a/pkg/daemon/ceph/client/filesystem.go b/pkg/daemon/ceph/client/filesystem.go index 9917181af3a1..08b3e707cbb6 100644 --- a/pkg/daemon/ceph/client/filesystem.go +++ b/pkg/daemon/ceph/client/filesystem.go @@ -284,8 +284,7 @@ func FailMDS(context *clusterd.Context, clusterInfo *ClusterInfo, gid int) error } // FailFilesystem efficiently brings down the filesystem by marking the filesystem as down -// and failing the MDSes using a single Ceph command. This works only from nautilus version -// of Ceph onwards. +// and failing the MDSes using a single Ceph command. func FailFilesystem(context *clusterd.Context, clusterInfo *ClusterInfo, fsName string) error { args := []string{"fs", "fail", fsName} _, err := NewCephCommand(context, clusterInfo, args).Run() diff --git a/pkg/daemon/ceph/client/filesystem_mirror.go b/pkg/daemon/ceph/client/filesystem_mirror.go index 1d6ba98d54d4..dfe6151cc01f 100644 --- a/pkg/daemon/ceph/client/filesystem_mirror.go +++ b/pkg/daemon/ceph/client/filesystem_mirror.go @@ -79,6 +79,10 @@ func DisableFilesystemSnapshotMirror(context *clusterd.Context, clusterInfo *Clu // Run command output, err := cmd.Run() if err != nil { + if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOTSUP) { + logger.Debug("filesystem mirroring is not enabled, nothing to disable") + return nil + } return errors.Wrapf(err, "failed to disable ceph filesystem snapshot mirror for filesystem %q. %s", filesystem, output) } diff --git a/pkg/daemon/ceph/client/info.go b/pkg/daemon/ceph/client/info.go index bd658c358d8c..6c5ab5f529c1 100644 --- a/pkg/daemon/ceph/client/info.go +++ b/pkg/daemon/ceph/client/info.go @@ -78,7 +78,7 @@ func (c *ClusterInfo) NamespacedName() types.NamespacedName { } // AdminClusterInfo() creates a ClusterInfo with the basic info to access the cluster -// as an admin. Only the namespace and the ceph username fields are set in the struct, +// as an admin. Only a few fields are set in the struct, // so this clusterInfo cannot be used to generate the mon config or request the // namespacedName. A full cluster info must be populated for those operations. func AdminClusterInfo(namespace string) *ClusterInfo { diff --git a/pkg/daemon/ceph/client/mirror.go b/pkg/daemon/ceph/client/mirror.go index 2d84ab8b09b9..e992d6622d50 100644 --- a/pkg/daemon/ceph/client/mirror.go +++ b/pkg/daemon/ceph/client/mirror.go @@ -17,37 +17,59 @@ limitations under the License. package client import ( + "encoding/base64" "encoding/json" "fmt" "io/ioutil" "os" + "strings" "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" + "k8s.io/apimachinery/pkg/util/sets" +) + +// PeerToken is the content of the peer token +type PeerToken struct { + ClusterFSID string `json:"fsid"` + ClientID string `json:"client_id"` + Key string `json:"key"` + MonHost string `json:"mon_host"` + // These fields are added by Rook and NOT part of the output of client.CreateRBDMirrorBootstrapPeer() + Namespace string `json:"namespace"` +} + +var ( + rbdMirrorPeerCaps = []string{"mon", "profile rbd-mirror-peer", "osd", "profile rbd"} + rbdMirrorPeerKeyringID = "rbd-mirror-peer" ) // ImportRBDMirrorBootstrapPeer add a mirror peer in the rbd-mirror configuration -func ImportRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, direction string, token []byte) error { +func ImportRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *ClusterInfo, poolName string, direction string, token []byte) error { logger.Infof("add rbd-mirror bootstrap peer token for pool %q", poolName) // Token file - tokenFilePath := fmt.Sprintf("/tmp/rbd-mirror-token-%s", poolName) + tokenFilePattern := fmt.Sprintf("rbd-mirror-token-%s", poolName) + tokenFilePath, err := ioutil.TempFile("/tmp", tokenFilePattern) + if err != nil { + return errors.Wrapf(err, "failed to create temporary token file for pool %q", poolName) + } // Write token into a file - err := ioutil.WriteFile(tokenFilePath, token, 0400) + err = ioutil.WriteFile(tokenFilePath.Name(), token, 0400) if err != nil { - return errors.Wrapf(err, "failed to write token to file %q", tokenFilePath) + return errors.Wrapf(err, "failed to write token to file %q", tokenFilePath.Name()) } // Remove token once we exit, we don't need it anymore defer func() error { - err := os.Remove(tokenFilePath) + err := os.Remove(tokenFilePath.Name()) return err }() //nolint // we don't want to return here // Build command - args := []string{"mirror", "pool", "peer", "bootstrap", "import", poolName, tokenFilePath} + args := []string{"mirror", "pool", "peer", "bootstrap", "import", poolName, tokenFilePath.Name()} if direction != "" { args = append(args, "--direction", direction) } @@ -59,6 +81,7 @@ func ImportRBDMirrorBootstrapPeer(context *clusterd.Context, clusterInfo *Cluste return errors.Wrapf(err, "failed to add rbd-mirror peer token for pool %q. %s", poolName, output) } + logger.Infof("successfully added rbd-mirror peer token for pool %q", poolName) return nil } @@ -306,3 +329,58 @@ func ListSnapshotSchedulesRecursively(context *clusterd.Context, clusterInfo *Cl logger.Debugf("successfully recursively listed snapshot schedules for pool %q", poolName) return snapshotSchedulesRecursive, nil } + +/* CreateRBDMirrorBootstrapPeerWithoutPool creates a bootstrap peer for the current cluster +It creates the cephx user for the remote cluster to use with all the necessary details +This function is handy on scenarios where no pools have been created yet but replication communication is required (connecting peers) +It essentially sits above CreateRBDMirrorBootstrapPeer() +and is a cluster-wide option in the scenario where all the pools will be mirrored to the same remote cluster + +So the scenario looks like: + + 1) Create the cephx ID on the source cluster + + 2) Enable a source pool for mirroring - at any time, we just don't know when + rbd --cluster site-a mirror pool enable image-pool image + + 3) Copy the key details over to the other cluster (non-ceph workflow) + + 4) Enable destination pool for mirroring + rbd --cluster site-b mirror pool enable image-pool image + + 5) Add the peer details to the destination pool + + 6) Repeat the steps flipping source and destination to enable + bi-directional mirroring +*/ +func CreateRBDMirrorBootstrapPeerWithoutPool(context *clusterd.Context, clusterInfo *ClusterInfo) ([]byte, error) { + fullClientName := getQualifiedUser(rbdMirrorPeerKeyringID) + logger.Infof("create rbd-mirror bootstrap peer token %q", fullClientName) + key, err := AuthGetOrCreateKey(context, clusterInfo, fullClientName, rbdMirrorPeerCaps) + if err != nil { + return nil, errors.Wrapf(err, "failed to create rbd-mirror peer key %q", fullClientName) + } + logger.Infof("successfully created rbd-mirror bootstrap peer token for cluster %q", clusterInfo.NamespacedName().Name) + + mons := sets.NewString() + for _, mon := range clusterInfo.Monitors { + mons.Insert(mon.Endpoint) + } + + peerToken := PeerToken{ + ClusterFSID: clusterInfo.FSID, + ClientID: rbdMirrorPeerKeyringID, + Key: key, + MonHost: strings.Join(mons.List(), ","), + Namespace: clusterInfo.Namespace, + } + + // Marshal the Go type back to JSON + decodedTokenBackToJSON, err := json.Marshal(peerToken) + if err != nil { + return nil, errors.Wrap(err, "failed to encode peer token to json") + } + + // Return the base64 encoded token + return []byte(base64.StdEncoding.EncodeToString(decodedTokenBackToJSON)), nil +} diff --git a/pkg/daemon/ceph/client/mirror_test.go b/pkg/daemon/ceph/client/mirror_test.go index cfa4f09dafe0..b4220e7d7522 100644 --- a/pkg/daemon/ceph/client/mirror_test.go +++ b/pkg/daemon/ceph/client/mirror_test.go @@ -107,7 +107,6 @@ func TestImportRBDMirrorBootstrapPeer(t *testing.T) { assert.Equal(t, "bootstrap", args[3]) assert.Equal(t, "import", args[4]) assert.Equal(t, pool, args[5]) - assert.Equal(t, "/tmp/rbd-mirror-token-pool-test", args[6]) assert.Equal(t, 11, len(args)) return mirrorStatus, nil } @@ -125,7 +124,6 @@ func TestImportRBDMirrorBootstrapPeer(t *testing.T) { assert.Equal(t, "bootstrap", args[3]) assert.Equal(t, "import", args[4]) assert.Equal(t, pool, args[5]) - assert.Equal(t, "/tmp/rbd-mirror-token-pool-test", args[6]) assert.Equal(t, "--direction", args[7]) assert.Equal(t, "rx-tx", args[8]) assert.Equal(t, 13, len(args)) diff --git a/pkg/daemon/ceph/client/mon.go b/pkg/daemon/ceph/client/mon.go index dec00f07b13d..08cfc92a8082 100644 --- a/pkg/daemon/ceph/client/mon.go +++ b/pkg/daemon/ceph/client/mon.go @@ -152,3 +152,14 @@ func SetMonStretchTiebreaker(context *clusterd.Context, clusterInfo *ClusterInfo logger.Infof("successfully set mon tiebreaker %q in failure domain %q", monName, bucketType) return nil } + +// SetNewTiebreaker sets the new tiebreaker mon in the stretch cluster during a failover +func SetNewTiebreaker(context *clusterd.Context, clusterInfo *ClusterInfo, monName string) error { + logger.Infof("setting new mon tiebreaker %q in arbiter zone", monName) + args := []string{"mon", "set_new_tiebreaker", monName} + if _, err := NewCephCommand(context, clusterInfo, args).Run(); err != nil { + return errors.Wrapf(err, "failed to set new mon tiebreaker %q", monName) + } + logger.Infof("successfully set new mon tiebreaker %q in arbiter zone", monName) + return nil +} diff --git a/pkg/daemon/ceph/client/mon_test.go b/pkg/daemon/ceph/client/mon_test.go index 448098446c49..9c3a8c4414a7 100644 --- a/pkg/daemon/ceph/client/mon_test.go +++ b/pkg/daemon/ceph/client/mon_test.go @@ -18,9 +18,11 @@ package client import ( "fmt" "testing" + "time" "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/util/exec" exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" ) @@ -29,6 +31,7 @@ func TestCephArgs(t *testing.T) { // cluster a under /etc args := []string{} clusterInfo := AdminClusterInfo("a") + exec.CephCommandsTimeout = 15 * time.Second command, args := FinalizeCephCommandArgs(CephTool, clusterInfo, args, "/etc") assert.Equal(t, CephTool, command) assert.Equal(t, 5, len(args)) @@ -85,15 +88,22 @@ func TestStretchElectionStrategy(t *testing.T) { func TestStretchClusterMonTiebreaker(t *testing.T) { monName := "a" failureDomain := "rack" + setTiebreaker := false + enabledStretch := false executor := &exectest.MockExecutor{} executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) switch { case args[0] == "mon" && args[1] == "enable_stretch_mode": + enabledStretch = true assert.Equal(t, monName, args[2]) assert.Equal(t, defaultStretchCrushRuleName, args[3]) assert.Equal(t, failureDomain, args[4]) return "", nil + case args[0] == "mon" && args[1] == "set_new_tiebreaker": + setTiebreaker = true + assert.Equal(t, monName, args[2]) + return "", nil } return "", errors.Errorf("unexpected ceph command %q", args) } @@ -102,6 +112,14 @@ func TestStretchClusterMonTiebreaker(t *testing.T) { err := SetMonStretchTiebreaker(context, clusterInfo, monName, failureDomain) assert.NoError(t, err) + assert.True(t, enabledStretch) + assert.False(t, setTiebreaker) + enabledStretch = false + + err = SetNewTiebreaker(context, clusterInfo, monName) + assert.NoError(t, err) + assert.True(t, setTiebreaker) + assert.False(t, enabledStretch) } func TestMonDump(t *testing.T) { diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index e1500b0d0658..421f2e2a5a1c 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -34,7 +34,7 @@ const ( confirmFlag = "--yes-i-really-mean-it" reallyConfirmFlag = "--yes-i-really-really-mean-it" targetSizeRatioProperty = "target_size_ratio" - compressionModeProperty = "compression_mode" + CompressionModeProperty = "compression_mode" PgAutoscaleModeProperty = "pg_autoscale_mode" PgAutoscaleModeOn = "on" ) @@ -124,6 +124,11 @@ func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name st return CephStoragePoolDetails{}, errors.Wrapf(err, "failed to get pool %s details. %s", name, string(output)) } + return ParsePoolDetails(output) +} + +func ParsePoolDetails(in []byte) (CephStoragePoolDetails, error) { + // The response for osd pool get when passing var=all is actually malformed JSON similar to: // {"pool":"rbd","size":1}{"pool":"rbd","min_size":2}... // Note the multiple top level entities, one for each property returned. To workaround this, @@ -132,7 +137,7 @@ func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name st // Since previously set fields remain intact if they are not overwritten, the result is the JSON // unmarshalling of all properties in the response. var poolDetails CephStoragePoolDetails - poolDetailsUnits := strings.Split(string(output), "}{") + poolDetailsUnits := strings.Split(string(in), "}{") for i := range poolDetailsUnits { pdu := poolDetailsUnits[i] if !strings.HasPrefix(pdu, "{") { @@ -143,7 +148,7 @@ func GetPoolDetails(context *clusterd.Context, clusterInfo *ClusterInfo, name st } err := json.Unmarshal([]byte(pdu), &poolDetails) if err != nil { - return CephStoragePoolDetails{}, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(output)) + return CephStoragePoolDetails{}, errors.Wrapf(err, "unmarshal failed raw buffer response %s", string(in)) } } @@ -247,7 +252,7 @@ func setCommonPoolProperties(context *clusterd.Context, clusterInfo *ClusterInfo } if pool.IsCompressionEnabled() { - pool.Parameters[compressionModeProperty] = pool.CompressionMode + pool.Parameters[CompressionModeProperty] = pool.CompressionMode } // Apply properties @@ -403,8 +408,11 @@ func CreateReplicatedPoolForApp(context *clusterd.Context, clusterInfo *ClusterI if !clusterSpec.IsStretchCluster() { // the pool is type replicated, set the size for the pool now that it's been created - if err := SetPoolReplicatedSizeProperty(context, clusterInfo, poolName, strconv.FormatUint(uint64(pool.Replicated.Size), 10)); err != nil { - return errors.Wrapf(err, "failed to set size property to replicated pool %q to %d", poolName, pool.Replicated.Size) + // Only set the size if not 0, otherwise ceph will fail to set size to 0 + if pool.Replicated.Size > 0 { + if err := SetPoolReplicatedSizeProperty(context, clusterInfo, poolName, strconv.FormatUint(uint64(pool.Replicated.Size), 10)); err != nil { + return errors.Wrapf(err, "failed to set size property to replicated pool %q to %d", poolName, pool.Replicated.Size) + } } } diff --git a/pkg/daemon/ceph/client/test/info.go b/pkg/daemon/ceph/client/test/info.go index efb03c956ba1..6efd21917de3 100644 --- a/pkg/daemon/ceph/client/test/info.go +++ b/pkg/daemon/ceph/client/test/info.go @@ -62,5 +62,6 @@ func CreateTestClusterInfo(monCount int) *client.ClusterInfo { Endpoint: fmt.Sprintf("1.2.3.%d:6789", (i + 1)), } } + c.SetName(c.Namespace) return c } diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go index 8128018c2f0a..5fee97e7b1dd 100644 --- a/pkg/daemon/ceph/client/upgrade.go +++ b/pkg/daemon/ceph/client/upgrade.go @@ -37,7 +37,8 @@ const ( var ( // we don't perform any checks on these daemons // they don't have any "ok-to-stop" command implemented - daemonNoCheck = []string{"mgr", "rgw", "rbd-mirror", "nfs", "fs-mirror"} + daemonNoCheck = []string{"mgr", "rgw", "rbd-mirror", "nfs", "fs-mirror"} + errNoHostInCRUSH = errors.New("no host in crush map yet?") ) func getCephMonVersionString(context *clusterd.Context, clusterInfo *ClusterInfo) (string, error) { @@ -236,12 +237,12 @@ func StringInSlice(a string, list []string) bool { // Assume the following: // // "mon": { -// "ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)": 1, -// "ceph version 14.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 +// "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 2, +// "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 1 // } // -// In the case we will pick: "ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)": 1, -// And eventually return 13.2.5 +// In the case we will pick: "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 2, +// And eventually return 16.2.5 func LeastUptodateDaemonVersion(context *clusterd.Context, clusterInfo *ClusterInfo, daemonType string) (cephver.CephVersion, error) { var r map[string]int var vv cephver.CephVersion @@ -311,7 +312,7 @@ func allOSDsSameHost(context *clusterd.Context, clusterInfo *ClusterInfo) (bool, hostOsdNodes := len(hostOsdTree.Nodes) if hostOsdNodes == 0 { - return false, errors.New("no host in crush map yet?") + return false, errNoHostInCRUSH } // If the number of OSD node is 1, chances are this is simple setup with all OSDs on it @@ -369,6 +370,10 @@ func OSDUpdateShouldCheckOkToStop(context *clusterd.Context, clusterInfo *Cluste // aio means all in one aio, err := allOSDsSameHost(context, clusterInfo) if err != nil { + if errors.Is(err, errNoHostInCRUSH) { + logger.Warning("the CRUSH map has no 'host' entries so not performing ok-to-stop checks") + return false + } logger.Warningf("failed to determine if all osds are running on the same host. will check if OSDs are ok-to-stop. if all OSDs are running on one host %s. %v", userIntervention, err) return true } @@ -401,6 +406,18 @@ func osdDoNothing(context *clusterd.Context, clusterInfo *ClusterInfo) bool { // aio means all in one aio, err := allOSDsSameHost(context, clusterInfo) if err != nil { + // We return true so that we can continue without a retry and subsequently not test if the + // osd can be stopped This handles the scenario where the OSDs have been created but not yet + // started due to a wrong CR configuration For instance, when OSDs are encrypted and Vault + // is used to store encryption keys, if the KV version is incorrect during the cluster + // initialization the OSDs will fail to start and stay in CLBO until the CR is updated again + // with the correct KV version so that it can start For this scenario we don't need to go + // through the path where the check whether the OSD can be stopped or not, so it will always + // fail and make us wait for nothing + if errors.Is(err, errNoHostInCRUSH) { + logger.Warning("the CRUSH map has no 'host' entries so not performing ok-to-stop checks") + return true + } logger.Warningf("failed to determine if all osds are running on the same host, performing upgrade check anyways. %v", err) return false } diff --git a/pkg/daemon/ceph/client/upgrade_test.go b/pkg/daemon/ceph/client/upgrade_test.go index 6c3c297811b9..2035d84bb8a3 100644 --- a/pkg/daemon/ceph/client/upgrade_test.go +++ b/pkg/daemon/ceph/client/upgrade_test.go @@ -141,8 +141,8 @@ func TestDaemonMapEntry(t *testing.T) { dummyVersionsRaw := []byte(` { "mon": { - "ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)": 1, - "ceph version 14.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, + "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 } }`) @@ -357,4 +357,11 @@ func TestOSDUpdateShouldCheckOkToStop(t *testing.T) { treeOutput = fake.OsdTreeOutput(0, 0) assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) }) + + // degraded case, OSDs are failing to start so they haven't registered in the CRUSH map yet + t.Run("0 nodes with down OSDs", func(t *testing.T) { + lsOutput = fake.OsdLsOutput(3) + treeOutput = fake.OsdTreeOutput(0, 1) + assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) + }) } diff --git a/pkg/daemon/ceph/osd/daemon.go b/pkg/daemon/ceph/osd/daemon.go index 3eeabea345b3..b158fc7d72b8 100644 --- a/pkg/daemon/ceph/osd/daemon.go +++ b/pkg/daemon/ceph/osd/daemon.go @@ -180,7 +180,7 @@ func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topolo // set the initial orchestration status status := oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) + oposd.UpdateNodeOrPVCStatus(agent.kv, agent.nodeName, status) if err := client.WriteCephConfig(context, agent.clusterInfo); err != nil { return errors.Wrap(err, "failed to generate ceph config") @@ -221,7 +221,7 @@ func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topolo // orchestration is about to start, update the status status = oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) + oposd.UpdateNodeOrPVCStatus(agent.kv, agent.nodeName, status) // start the desired OSDs on devices logger.Infof("configuring osd devices: %+v", devices) @@ -238,7 +238,7 @@ func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topolo if len(deviceOSDs) == 0 { logger.Warningf("skipping OSD configuration as no devices matched the storage settings for this node %q", agent.nodeName) status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) + oposd.UpdateNodeOrPVCStatus(agent.kv, agent.nodeName, status) return nil } @@ -278,7 +278,7 @@ func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topolo // orchestration is completed, update the status status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked} - oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status) + oposd.UpdateNodeOrPVCStatus(agent.kv, agent.nodeName, status) return nil } diff --git a/pkg/daemon/ceph/osd/kms/kms.go b/pkg/daemon/ceph/osd/kms/kms.go index 8730383c934d..18e18b6d6521 100644 --- a/pkg/daemon/ceph/osd/kms/kms.go +++ b/pkg/daemon/ceph/osd/kms/kms.go @@ -24,6 +24,7 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/hashicorp/vault/api" "github.com/libopenstorage/secrets" + "github.com/libopenstorage/secrets/vault" "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" @@ -148,13 +149,39 @@ func GetParam(kmsConfig map[string]string, param string) string { } // ValidateConnectionDetails validates mandatory KMS connection details -func ValidateConnectionDetails(clusterdContext *clusterd.Context, securitySpec cephv1.SecuritySpec, ns string) error { +func ValidateConnectionDetails(clusterdContext *clusterd.Context, securitySpec *cephv1.SecuritySpec, ns string) error { ctx := context.TODO() // A token must be specified if !securitySpec.KeyManagementService.IsTokenAuthEnabled() { return errors.New("failed to validate kms configuration (missing token in spec)") } + // KMS provider must be specified + provider := GetParam(securitySpec.KeyManagementService.ConnectionDetails, Provider) + + // Validate potential token Secret presence + if securitySpec.KeyManagementService.IsTokenAuthEnabled() { + kmsToken, err := clusterdContext.Clientset.CoreV1().Secrets(ns).Get(ctx, securitySpec.KeyManagementService.TokenSecretName, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to fetch kms token secret %q", securitySpec.KeyManagementService.TokenSecretName) + } + + // Check for empty token + token, ok := kmsToken.Data[KMSTokenSecretNameKey] + if !ok || len(token) == 0 { + return errors.Errorf("failed to read k8s kms secret %q key %q (not found or empty)", KMSTokenSecretNameKey, securitySpec.KeyManagementService.TokenSecretName) + } + + switch provider { + case "vault": + // Set the env variable + err = os.Setenv(api.EnvVaultToken, string(token)) + if err != nil { + return errors.Wrap(err, "failed to set vault kms token to an env var") + } + } + } + // Lookup mandatory connection details for _, config := range kmsMandatoryConnectionDetails { if GetParam(securitySpec.KeyManagementService.ConnectionDetails, config) == "" { @@ -163,26 +190,27 @@ func ValidateConnectionDetails(clusterdContext *clusterd.Context, securitySpec c } // Validate KMS provider connection details - switch GetParam(securitySpec.KeyManagementService.ConnectionDetails, Provider) { + switch provider { case "vault": err := validateVaultConnectionDetails(clusterdContext, ns, securitySpec.KeyManagementService.ConnectionDetails) if err != nil { return errors.Wrap(err, "failed to validate vault connection details") } - } - // Validate potential token Secret presence - if securitySpec.KeyManagementService.IsTokenAuthEnabled() { - kmsToken, err := clusterdContext.Clientset.CoreV1().Secrets(ns).Get(ctx, securitySpec.KeyManagementService.TokenSecretName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to fetch kms token secret %q", securitySpec.KeyManagementService.TokenSecretName) - } - - // Check for empty token - token, ok := kmsToken.Data[KMSTokenSecretNameKey] - if !ok || len(token) == 0 { - return errors.Errorf("failed to read k8s kms secret %q key %q (not found or empty)", KMSTokenSecretNameKey, securitySpec.KeyManagementService.TokenSecretName) + secretEngine := securitySpec.KeyManagementService.ConnectionDetails[VaultSecretEngineKey] + switch secretEngine { + case VaultKVSecretEngineKey: + // Append Backend Version if not already present + if GetParam(securitySpec.KeyManagementService.ConnectionDetails, vault.VaultBackendKey) == "" { + backendVersion, err := BackendVersion(clusterdContext, ns, securitySpec.KeyManagementService.ConnectionDetails) + if err != nil { + return errors.Wrap(err, "failed to get backend version") + } + securitySpec.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] = backendVersion + } } + default: + return errors.Errorf("failed to validate kms provider connection details (provider %q not supported)", provider) } return nil diff --git a/pkg/daemon/ceph/osd/kms/kms_test.go b/pkg/daemon/ceph/osd/kms/kms_test.go index 20202b7cd1ad..b84a0dbd79d6 100644 --- a/pkg/daemon/ceph/osd/kms/kms_test.go +++ b/pkg/daemon/ceph/osd/kms/kms_test.go @@ -21,6 +21,8 @@ import ( "os" "testing" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/vault" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/test" @@ -33,7 +35,7 @@ func TestValidateConnectionDetails(t *testing.T) { ctx := context.TODO() // Placeholder context := &clusterd.Context{Clientset: test.New(t, 3)} - securitySpec := cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{ConnectionDetails: map[string]string{}}} + securitySpec := &cephv1.SecuritySpec{KeyManagementService: cephv1.KeyManagementServiceSpec{ConnectionDetails: map[string]string{}}} ns := "rook-ceph" // Error: no token in spec @@ -43,20 +45,6 @@ func TestValidateConnectionDetails(t *testing.T) { securitySpec.KeyManagementService.TokenSecretName = "vault-token" - // Error: Data is present but no provider - securitySpec.KeyManagementService.ConnectionDetails = map[string]string{"foo": "bar"} - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate kms config \"KMS_PROVIDER\". cannot be empty") - - // Error: Data has a KMS_PROVIDER but missing details - securitySpec.KeyManagementService.ConnectionDetails["KMS_PROVIDER"] = "vault" - err = ValidateConnectionDetails(context, securitySpec, ns) - assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate vault connection details: failed to find connection details \"VAULT_ADDR\"") - - // Error: connection details are correct but the token secret does not exist - securitySpec.KeyManagementService.ConnectionDetails["VAULT_ADDR"] = "https://1.1.1.1:8200" err = ValidateConnectionDetails(context, securitySpec, ns) assert.Error(t, err, "") assert.EqualError(t, err, "failed to fetch kms token secret \"vault-token\": secrets \"vault-token\" not found") @@ -83,17 +71,27 @@ func TestValidateConnectionDetails(t *testing.T) { assert.EqualError(t, err, "failed to read k8s kms secret \"token\" key \"vault-token\" (not found or empty)") // Success: token content is ok - s.Data["token"] = []byte("myt-otkenbenvqrev") + s.Data["token"] = []byte("token") _, err = context.Clientset.CoreV1().Secrets(ns).Update(ctx, s, metav1.UpdateOptions{}) assert.NoError(t, err) err = ValidateConnectionDetails(context, securitySpec, ns) - assert.NoError(t, err, "") + assert.Error(t, err, "") + assert.EqualError(t, err, "failed to validate kms config \"KMS_PROVIDER\". cannot be empty") + securitySpec.KeyManagementService.ConnectionDetails["KMS_PROVIDER"] = "vault" + + // Error: Data has a KMS_PROVIDER but missing details + err = ValidateConnectionDetails(context, securitySpec, ns) + assert.Error(t, err, "") + assert.EqualError(t, err, "failed to validate vault connection details: failed to find connection details \"VAULT_ADDR\"") + + // Error: connection details are correct but the token secret does not exist + securitySpec.KeyManagementService.ConnectionDetails["VAULT_ADDR"] = "https://1.1.1.1:8200" // Error: TLS is configured but secrets do not exist securitySpec.KeyManagementService.ConnectionDetails["VAULT_CACERT"] = "vault-ca-secret" err = ValidateConnectionDetails(context, securitySpec, ns) assert.Error(t, err, "") - assert.EqualError(t, err, "failed to validate vault connection details: failed to find TLS connection details k8s secret \"VAULT_CACERT\"") + assert.EqualError(t, err, "failed to validate vault connection details: failed to find TLS connection details k8s secret \"vault-ca-secret\"") // Error: TLS secret exists but empty key tlsSecret := &v1.Secret{ @@ -114,6 +112,41 @@ func TestValidateConnectionDetails(t *testing.T) { assert.NoError(t, err) err = ValidateConnectionDetails(context, securitySpec, ns) assert.NoError(t, err, "") + + // test with vauult server + t.Run("success - auto detect kv version and set it", func(t *testing.T) { + cluster := fakeVaultServer(t) + cluster.Start() + defer cluster.Cleanup() + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + client := cluster.Cores[0].Client + // Mock the client here + vaultClient = func(clusterdContext *clusterd.Context, namespace string, secretConfig map[string]string) (*api.Client, error) { + return client, nil + } + if err := client.Sys().Mount("rook/", &api.MountInput{ + Type: "kv-v2", + Options: map[string]string{"version": "2"}, + }); err != nil { + t.Fatal(err) + } + securitySpec := &cephv1.SecuritySpec{ + KeyManagementService: cephv1.KeyManagementServiceSpec{ + ConnectionDetails: map[string]string{ + "VAULT_SECRET_ENGINE": "kv", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": client.Address(), + "VAULT_BACKEND_PATH": "rook", + }, + TokenSecretName: "vault-token", + }, + } + err = ValidateConnectionDetails(context, securitySpec, ns) + assert.NoError(t, err, "") + assert.Equal(t, securitySpec.KeyManagementService.ConnectionDetails["VAULT_BACKEND"], "v2") + }) + } func TestSetTokenToEnvVar(t *testing.T) { diff --git a/pkg/daemon/ceph/osd/kms/vault.go b/pkg/daemon/ceph/osd/kms/vault.go index 5948c2fe3d8b..182c55e1db40 100644 --- a/pkg/daemon/ceph/osd/kms/vault.go +++ b/pkg/daemon/ceph/osd/kms/vault.go @@ -19,6 +19,7 @@ package kms import ( "context" "io/ioutil" + "os" "strings" "github.com/hashicorp/vault/api" @@ -45,6 +46,14 @@ var ( vaultMandatoryConnectionDetails = []string{api.EnvVaultAddress} ) +// Used for unit tests mocking too as well as production code +var ( + createTmpFile = ioutil.TempFile + getRemoveCertFiles = getRemoveCertFilesFunc +) + +type removeCertFilesFunction func() + /* VAULT API INTERNAL VALUES // Refer to https://pkg.golangclub.com/github.com/hashicorp/vault/api?tab=doc#pkg-constants const EnvVaultAddress = "VAULT_ADDR" @@ -77,10 +86,11 @@ func InitVault(context *clusterd.Context, namespace string, config map[string]st } // Populate TLS config - newConfigWithTLS, err := configTLS(context, namespace, oriConfig) + newConfigWithTLS, removeCertFiles, err := configTLS(context, namespace, oriConfig) if err != nil { return nil, errors.Wrap(err, "failed to initialize vault tls configuration") } + defer removeCertFiles() // Populate TLS config for key, value := range newConfigWithTLS { @@ -96,8 +106,31 @@ func InitVault(context *clusterd.Context, namespace string, config map[string]st return v, nil } -func configTLS(clusterdContext *clusterd.Context, namespace string, config map[string]string) (map[string]string, error) { +// configTLS returns a map of TLS config that map physical files for the TLS library to load +// Also it returns a function to remove the temporary files (certs, keys) +// The signature has named result parameters to help building 'defer' statements especially for the +// content of removeCertFiles which needs to be populated by the files to remove if no errors and be +// nil on errors +func configTLS(clusterdContext *clusterd.Context, namespace string, config map[string]string) (newConfig map[string]string, removeCertFiles removeCertFilesFunction, retErr error) { ctx := context.TODO() + var filesToRemove []*os.File + + defer func() { + // Build the function that the caller should use to remove the temp files here + // create it when this function is returning based on the currently-recorded files + removeCertFiles = getRemoveCertFiles(filesToRemove) + if retErr != nil { + // If we encountered an error, remove the temp files + removeCertFiles() + + // Also return an empty function to remove the temp files + // It's fine to use nil here since the defer from the calling functions is only + // triggered after evaluating any error, if on error the defer is not triggered since we + // have returned already + removeCertFiles = nil + } + }() + for _, tlsOption := range cephv1.VaultTLSConnectionDetails { tlsSecretName := GetParam(config, tlsOption) if tlsSecretName == "" { @@ -107,31 +140,52 @@ func configTLS(clusterdContext *clusterd.Context, namespace string, config map[s if !strings.Contains(tlsSecretName, EtcVaultDir) { secret, err := clusterdContext.Clientset.CoreV1().Secrets(namespace).Get(ctx, tlsSecretName, v1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "failed to fetch tls k8s secret %q", tlsSecretName) + return nil, removeCertFiles, errors.Wrapf(err, "failed to fetch tls k8s secret %q", tlsSecretName) } - // Generate a temp file - file, err := ioutil.TempFile("", "") + file, err := createTmpFile("", "") if err != nil { - return nil, errors.Wrapf(err, "failed to generate temp file for k8s secret %q content", tlsSecretName) + return nil, removeCertFiles, errors.Wrapf(err, "failed to generate temp file for k8s secret %q content", tlsSecretName) } // Write into a file err = ioutil.WriteFile(file.Name(), secret.Data[tlsSecretKeyToCheck(tlsOption)], 0444) if err != nil { - return nil, errors.Wrapf(err, "failed to write k8s secret %q content to a file", tlsSecretName) + return nil, removeCertFiles, errors.Wrapf(err, "failed to write k8s secret %q content to a file", tlsSecretName) } logger.Debugf("replacing %q current content %q with %q", tlsOption, config[tlsOption], file.Name()) - // update the env var with the path + // Update the env var with the path config[tlsOption] = file.Name() + + // Add the file to the list of files to remove + filesToRemove = append(filesToRemove, file) } else { logger.Debugf("value of tlsOption %q tlsSecretName is already correct %q", tlsOption, tlsSecretName) } } - return config, nil + return config, removeCertFiles, nil +} + +func getRemoveCertFilesFunc(filesToRemove []*os.File) removeCertFilesFunction { + return removeCertFilesFunction(func() { + for _, file := range filesToRemove { + logger.Debugf("closing %q", file.Name()) + err := file.Close() + if err != nil { + logger.Errorf("failed to close file %q. %v", file.Name(), err) + } + logger.Debugf("closed %q", file.Name()) + logger.Debugf("removing %q", file.Name()) + err = os.Remove(file.Name()) + if err != nil { + logger.Errorf("failed to remove file %q. %v", file.Name(), err) + } + logger.Debugf("removed %q", file.Name()) + } + }) } func put(v secrets.Secrets, secretName, secretValue string, keyContext map[string]string) error { @@ -183,7 +237,7 @@ func buildKeyContext(config map[string]string) map[string]string { keyContext := map[string]string{secrets.KeyVaultNamespace: config[api.EnvVaultNamespace]} vaultNamespace, ok := config[api.EnvVaultNamespace] if !ok || vaultNamespace == "" { - keyContext = nil + keyContext = map[string]string{} } return keyContext @@ -215,7 +269,7 @@ func validateVaultConnectionDetails(clusterdContext *clusterd.Context, ns string // Fetch the secret s, err := clusterdContext.Clientset.CoreV1().Secrets(ns).Get(ctx, tlsSecretName, v1.GetOptions{}) if err != nil { - return errors.Errorf("failed to find TLS connection details k8s secret %q", tlsOption) + return errors.Errorf("failed to find TLS connection details k8s secret %q", tlsSecretName) } // Check the Secret key and its content diff --git a/pkg/daemon/ceph/osd/kms/vault_api.go b/pkg/daemon/ceph/osd/kms/vault_api.go new file mode 100644 index 000000000000..9361a6c9ca68 --- /dev/null +++ b/pkg/daemon/ceph/osd/kms/vault_api.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import ( + "os" + "strings" + + "github.com/libopenstorage/secrets/vault" + "github.com/libopenstorage/secrets/vault/utils" + "github.com/pkg/errors" + "github.com/rook/rook/pkg/clusterd" + + "github.com/hashicorp/vault/api" +) + +const ( + kvVersionKey = "version" + kvVersion1 = "kv" + kvVersion2 = "kv-v2" +) + +// vaultClient returns a vault client, also used in unit tests to mock the client +var vaultClient = newVaultClient + +// newVaultClient returns a vault client, there is no need for any secretConfig validation +// Since this is called after an already validated call InitVault() +func newVaultClient(clusterdContext *clusterd.Context, namespace string, secretConfig map[string]string) (*api.Client, error) { + // DefaultConfig uses the environment variables if present. + config := api.DefaultConfig() + + // Always use a new map otherwise the map will mutate and subsequent calls will fail since the + // TLS content has been altered by the TLS config in vaultClient() + localSecretConfig := make(map[string]string) + for k, v := range secretConfig { + localSecretConfig[k] = v + } + + // Convert map string to map interface + c := make(map[string]interface{}) + for k, v := range localSecretConfig { + c[k] = v + } + + // Populate TLS config + newConfigWithTLS, removeCertFiles, err := configTLS(clusterdContext, namespace, localSecretConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize vault tls configuration") + } + defer removeCertFiles() + + // Populate TLS config + for key, value := range newConfigWithTLS { + c[key] = string(value) + } + + // Configure TLS + if err := utils.ConfigureTLS(config, c); err != nil { + return nil, err + } + + // Initialize the vault client + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + + // Set the token if provided, token should be set by ValidateConnectionDetails() if applicable + // api.NewClient() already looks up the token from the environment but we need to set it here and remove potential malformed tokens + client.SetToken(strings.TrimSuffix(os.Getenv(api.EnvVaultToken), "\n")) + + // Set Vault address, was validated by ValidateConnectionDetails() + err = client.SetAddress(strings.TrimSuffix(localSecretConfig[api.EnvVaultAddress], "\n")) + if err != nil { + return nil, err + } + + return client, nil +} + +func BackendVersion(clusterdContext *clusterd.Context, namespace string, secretConfig map[string]string) (string, error) { + v1 := "v1" + v2 := "v2" + + backendPath := GetParam(secretConfig, vault.VaultBackendPathKey) + if backendPath == "" { + backendPath = vault.DefaultBackendPath + } + + backend := GetParam(secretConfig, vault.VaultBackendKey) + switch backend { + case kvVersion1, v1: + logger.Info("vault kv secret engine version set to v1") + return v1, nil + case kvVersion2, v2: + logger.Info("vault kv secret engine version set to v2") + return v2, nil + default: + // Initialize Vault client + vaultClient, err := vaultClient(clusterdContext, namespace, secretConfig) + if err != nil { + return "", errors.Wrap(err, "failed to initialize vault client") + } + + mounts, err := vaultClient.Sys().ListMounts() + if err != nil { + return "", errors.Wrap(err, "failed to list vault system mounts") + } + + for path, mount := range mounts { + // path is represented as 'path/' + if trimSlash(path) == trimSlash(backendPath) { + version := mount.Options[kvVersionKey] + if version == "2" { + logger.Info("vault kv secret engine version auto-detected to v2") + return v2, nil + } + logger.Info("vault kv secret engine version auto-detected to v1") + return v1, nil + } + } + } + + return "", errors.Errorf("secrets engine with mount path %q not found", backendPath) +} + +func trimSlash(in string) string { + return strings.Trim(in, "/") +} diff --git a/pkg/daemon/ceph/osd/kms/vault_api_test.go b/pkg/daemon/ceph/osd/kms/vault_api_test.go new file mode 100644 index 000000000000..50863abcfa57 --- /dev/null +++ b/pkg/daemon/ceph/osd/kms/vault_api_test.go @@ -0,0 +1,201 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kms + +import ( + "context" + "testing" + + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/libopenstorage/secrets/vault/utils" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/operator/test" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestBackendVersion(t *testing.T) { + cluster := fakeVaultServer(t) + cluster.Start() + defer cluster.Cleanup() + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + client := cluster.Cores[0].Client + + // Mock the client here + vaultClient = func(clusterdContext *clusterd.Context, namespace string, secretConfig map[string]string) (*api.Client, error) { + return client, nil + } + + // Set up the kv store + if err := client.Sys().Mount("rook/", &api.MountInput{ + Type: "kv", + Options: map[string]string{"version": "1"}, + }); err != nil { + t.Fatal(err) + } + if err := client.Sys().Mount("rookv2/", &api.MountInput{ + Type: "kv-v2", + Options: map[string]string{"version": "2"}, + }); err != nil { + t.Fatal(err) + } + + type args struct { + secretConfig map[string]string + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + {"v1 is set explicitly", args{map[string]string{"VAULT_BACKEND": "v1"}}, "v1", false}, + {"v2 is set explicitly", args{map[string]string{"VAULT_BACKEND": "v2"}}, "v2", false}, + {"v1 is set auto-discovered", args{map[string]string{"VAULT_ADDR": client.Address(), "VAULT_BACKEND_PATH": "rook"}}, "v1", false}, + {"v2 is set auto-discovered", args{map[string]string{"VAULT_ADDR": client.Address(), "VAULT_BACKEND_PATH": "rookv2"}}, "v2", false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := BackendVersion(&clusterd.Context{}, "ns", tt.args.secretConfig) + if (err != nil) != tt.wantErr { + t.Errorf("BackendVersion() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("BackendVersion() = %v, want %v", got, tt.want) + } + }) + } +} + +func fakeVaultServer(t *testing.T) *vault.TestCluster { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + DevToken: "token", + LogicalBackends: map[string]logical.Factory{"kv": kv.Factory}, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + + return cluster +} + +func TestTLSConfig(t *testing.T) { + ns := "rook-ceph" + ctx := context.TODO() + context := &clusterd.Context{Clientset: test.New(t, 3)} + secretConfig := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_CLIENT_CERT": "vault-client-cert", + "VAULT_CLIENT_KEY": "vault-client-key", + } + + // DefaultConfig uses the environment variables if present. + config := api.DefaultConfig() + + // Convert map string to map interface + c := make(map[string]interface{}) + for k, v := range secretConfig { + c[k] = v + } + + sCa := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-ca-cert", + Namespace: ns, + }, + Data: map[string][]byte{"cert": []byte(`-----BEGIN CERTIFICATE----- +MIIBJTCB0AIJAPNFNz1CNlDOMA0GCSqGSIb3DQEBCwUAMBoxCzAJBgNVBAYTAkZS +MQswCQYDVQQIDAJGUjAeFw0yMTA5MzAwODAzNDBaFw0yNDA2MjYwODAzNDBaMBox +CzAJBgNVBAYTAkZSMQswCQYDVQQIDAJGUjBcMA0GCSqGSIb3DQEBAQUAA0sAMEgC +QQDHeZ47hVBcryl6SCghM8Zj3Q6DQzJzno1J7EjPXef5m+pIVAEylS9sQuwKtFZc +vv3qS/OVFExmMdbrvfKEIfbBAgMBAAEwDQYJKoZIhvcNAQELBQADQQAAnflLuUM3 +4Dq0v7If4cgae2mr7jj3U/lIpHVtFbF7kVjC/eqmeN1a9u0UbRHKkUr+X1mVX3rJ +BvjQDN6didwQ +-----END CERTIFICATE-----`)}, + } + + sClCert := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-client-cert", + Namespace: ns, + }, + Data: map[string][]byte{"cert": []byte(`-----BEGIN CERTIFICATE----- +MIIBEDCBuwIBATANBgkqhkiG9w0BAQUFADAaMQswCQYDVQQGEwJGUjELMAkGA1UE +CAwCRlIwHhcNMjEwOTMwMDgwNDA1WhcNMjQwNjI2MDgwNDA1WjANMQswCQYDVQQG +EwJGUjBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCpWJqKhSES3BiFkt2M82xy3tkB +plDS8DM0s/+VkqfZlVG18KbbIVDHi1lsPjjs/Aja7lWymw0ycV4KGEcqxdmNAgMB +AAEwDQYJKoZIhvcNAQEFBQADQQC5esmoTqp4uEWyC+GKbTTFp8ngMUywAtZJs4nS +wdoF3ZJJzo4ps0saP1ww5LBdeeXUURscxyaFfCFmGODaHJJn +-----END CERTIFICATE-----`)}, + } + + sClKey := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-client-key", + Namespace: ns, + }, + Data: map[string][]byte{"key": []byte(`-----BEGIN PRIVATE KEY----- +MIIBVgIBADANBgkqhkiG9w0BAQEFAASCAUAwggE8AgEAAkEAqViaioUhEtwYhZLd +jPNsct7ZAaZQ0vAzNLP/lZKn2ZVRtfCm2yFQx4tZbD447PwI2u5VspsNMnFeChhH +KsXZjQIDAQABAkARlCv+oxEq1wQIoZUz83TXe8CFBlGvg9Wc6+5lBWM9F7K4by7i +IB5hQ2oaTNN+1Kxzf+XRM9R7sMPP9qFEp0LhAiEA0PzsQqbvNUVEx8X16Hed6V/Z +yvL1iZeHvc2QIbGjZGkCIQDPcM7U0frsFIPuMY4zpX2b6w4rpxZN7Kybp9/3l0tX +hQIhAJVWVsGeJksLr4WNuRYf+9BbNPdoO/rRNCd2L+tT060ZAiEAl0uontITl9IS +s0yTcZm29lxG9pGkE+uVrOWQ1W0Ud10CIQDJ/L+VCQgjO+SviUECc/nMwhWDMT+V +cjLxGL8tcZjHKg== +-----END PRIVATE KEY-----`)}, + } + + for _, s := range []*v1.Secret{sCa, sClCert, sClKey} { + if secret, err := context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } else { + defer func() { + err := context.Clientset.CoreV1().Secrets(ns).Delete(ctx, secret.Name, metav1.DeleteOptions{}) + if err != nil { + logger.Errorf("failed to delete secret %s: %v", secret.Name, err) + } + }() + } + } + + // Populate TLS config + newConfigWithTLS, removeCertFiles, err := configTLS(context, ns, secretConfig) + assert.NoError(t, err) + defer removeCertFiles() + + // Populate TLS config + for key, value := range newConfigWithTLS { + c[key] = string(value) + } + + // Configure TLS + err = utils.ConfigureTLS(config, c) + assert.NoError(t, err) +} diff --git a/pkg/daemon/ceph/osd/kms/vault_test.go b/pkg/daemon/ceph/osd/kms/vault_test.go index c7c8e1ffac40..adbe16148342 100644 --- a/pkg/daemon/ceph/osd/kms/vault_test.go +++ b/pkg/daemon/ceph/osd/kms/vault_test.go @@ -18,8 +18,12 @@ package kms import ( "context" + "io/ioutil" + "os" "testing" + "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/test" "github.com/stretchr/testify/assert" @@ -50,110 +54,226 @@ func Test_tlsSecretKeyToCheck(t *testing.T) { } func Test_configTLS(t *testing.T) { + // Set DEBUG logging + capnslog.SetGlobalLogLevel(capnslog.DEBUG) + os.Setenv("ROOK_LOG_LEVEL", "DEBUG") ctx := context.TODO() - config := map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - } ns := "rook-ceph" context := &clusterd.Context{Clientset: test.New(t, 3)} - // No tls config - _, err := configTLS(context, ns, config) - assert.NoError(t, err) - - // TLS config with correct values - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "/etc/vault/cacert", - "VAULT_SKIP_VERIFY": "false", - } - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.Equal(t, "/etc/vault/cacert", config["VAULT_CACERT"]) - - // TLS config but no secret - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_SKIP_VERIFY": "false", - } - _, err = configTLS(context, ns, config) - assert.Error(t, err) - assert.EqualError(t, err, "failed to fetch tls k8s secret \"vault-ca-cert\": secrets \"vault-ca-cert\" not found") - - // TLS config success! - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_SKIP_VERIFY": "false", - } - s := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-ca-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}) - assert.NoError(t, err) - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) - err = context.Clientset.CoreV1().Secrets(ns).Delete(ctx, s.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - - // All TLS success! - config = map[string]string{ - "foo": "bar", - "KMS_PROVIDER": "vault", - "VAULT_ADDR": "1.1.1.1", - "VAULT_BACKEND_PATH": "vault", - "VAULT_CACERT": "vault-ca-cert", - "VAULT_CLIENT_CERT": "vault-client-cert", - "VAULT_CLIENT_KEY": "vault-client-key", - } - sCa := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-ca-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - sClCert := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-client-cert", - Namespace: ns, - }, - Data: map[string][]byte{"cert": []byte("bar")}, - } - sClKey := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vault-client-key", - Namespace: ns, - }, - Data: map[string][]byte{"key": []byte("bar")}, - } - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sCa, metav1.CreateOptions{}) - assert.NoError(t, err) - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClCert, metav1.CreateOptions{}) - assert.NoError(t, err) - _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClKey, metav1.CreateOptions{}) - assert.NoError(t, err) - config, err = configTLS(context, ns, config) - assert.NoError(t, err) - assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) - assert.NotEqual(t, "vault-client-cert", config["VAULT_CLIENT_CERT"]) - assert.NotEqual(t, "vault-client-key", config["VAULT_CLIENT_KEY"]) + t.Run("no TLS config", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + } + // No tls config + _, removeCertFiles, err := configTLS(context, ns, config) + assert.NoError(t, err) + defer removeCertFiles() + }) + + t.Run("TLS config with already populated cert path", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "/etc/vault/cacert", + "VAULT_SKIP_VERIFY": "false", + } + config, removeCertFiles, err := configTLS(context, ns, config) + assert.NoError(t, err) + assert.Equal(t, "/etc/vault/cacert", config["VAULT_CACERT"]) + defer removeCertFiles() + }) + + t.Run("TLS config but no secret", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_SKIP_VERIFY": "false", + } + _, removeCertFiles, err := configTLS(context, ns, config) + assert.Error(t, err) + assert.EqualError(t, err, "failed to fetch tls k8s secret \"vault-ca-cert\": secrets \"vault-ca-cert\" not found") + assert.Nil(t, removeCertFiles) + }) + + t.Run("TLS config success!", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_SKIP_VERIFY": "false", + } + s := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-ca-cert", + Namespace: ns, + }, + Data: map[string][]byte{"cert": []byte("bar")}, + } + _, err := context.Clientset.CoreV1().Secrets(ns).Create(ctx, s, metav1.CreateOptions{}) + assert.NoError(t, err) + config, removeCertFiles, err := configTLS(context, ns, config) + defer removeCertFiles() + assert.NoError(t, err) + assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) + err = context.Clientset.CoreV1().Secrets(ns).Delete(ctx, s.Name, metav1.DeleteOptions{}) + assert.NoError(t, err) + }) + + t.Run("advanced TLS config success!", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_CLIENT_CERT": "vault-client-cert", + "VAULT_CLIENT_KEY": "vault-client-key", + } + sCa := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-ca-cert", + Namespace: ns, + }, + Data: map[string][]byte{"cert": []byte("bar")}, + } + sClCert := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-client-cert", + Namespace: ns, + }, + Data: map[string][]byte{"cert": []byte("bar")}, + } + sClKey := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vault-client-key", + Namespace: ns, + }, + Data: map[string][]byte{"key": []byte("bar")}, + } + _, err := context.Clientset.CoreV1().Secrets(ns).Create(ctx, sCa, metav1.CreateOptions{}) + assert.NoError(t, err) + _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClCert, metav1.CreateOptions{}) + assert.NoError(t, err) + _, err = context.Clientset.CoreV1().Secrets(ns).Create(ctx, sClKey, metav1.CreateOptions{}) + assert.NoError(t, err) + config, removeCertFiles, err := configTLS(context, ns, config) + assert.NoError(t, err) + assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) + assert.NotEqual(t, "vault-client-cert", config["VAULT_CLIENT_CERT"]) + assert.NotEqual(t, "vault-client-key", config["VAULT_CLIENT_KEY"]) + assert.FileExists(t, config["VAULT_CACERT"]) + assert.FileExists(t, config["VAULT_CLIENT_CERT"]) + assert.FileExists(t, config["VAULT_CLIENT_KEY"]) + removeCertFiles() + assert.NoFileExists(t, config["VAULT_CACERT"]) + assert.NoFileExists(t, config["VAULT_CLIENT_CERT"]) + assert.NoFileExists(t, config["VAULT_CLIENT_KEY"]) + }) + + t.Run("advanced TLS config success with timeout!", func(t *testing.T) { + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_CLIENT_CERT": "vault-client-cert", + "VAULT_CLIENT_KEY": "vault-client-key", + } + config, removeCertFiles, err := configTLS(context, ns, config) + assert.NoError(t, err) + assert.NotEqual(t, "vault-ca-cert", config["VAULT_CACERT"]) + assert.NotEqual(t, "vault-client-cert", config["VAULT_CLIENT_CERT"]) + assert.NotEqual(t, "vault-client-key", config["VAULT_CLIENT_KEY"]) + assert.FileExists(t, config["VAULT_CACERT"]) + assert.FileExists(t, config["VAULT_CLIENT_CERT"]) + assert.FileExists(t, config["VAULT_CLIENT_KEY"]) + removeCertFiles() + assert.NoFileExists(t, config["VAULT_CACERT"]) + assert.NoFileExists(t, config["VAULT_CLIENT_CERT"]) + assert.NoFileExists(t, config["VAULT_CLIENT_KEY"]) + }) + + // This test verifies that if any of ioutil.TempFile or ioutil.WriteFile fail during the TLS + // config loop we cleanup the already generated files. For instance, let's say we are at the + // second iteration, a file has been created, and then ioutil.TempFile fails, we must cleanup + // the previous file. Essentially we are verifying that defer does what it is supposed to do. + // Also, in this situation the cleanup function will be 'nil' and the caller won't run it so the + // configTLS() must do its own cleanup. + t.Run("advanced TLS config with temp file creation error", func(t *testing.T) { + createTmpFile = func(dir string, pattern string) (f *os.File, err error) { + // Create a fake temp file + ff, err := ioutil.TempFile("", "") + if err != nil { + logger.Error(err) + return nil, err + } + + // Add the file to the list of files to remove + var fakeFilesToRemove []*os.File + fakeFilesToRemove = append(fakeFilesToRemove, ff) + getRemoveCertFiles = func(filesToRemove []*os.File) removeCertFilesFunction { + return func() { + filesToRemove = fakeFilesToRemove + for _, f := range filesToRemove { + t.Logf("removing file %q after failure from TempFile call", f.Name()) + f.Close() + os.Remove(f.Name()) + } + } + } + os.Setenv("ROOK_TMP_FILE", ff.Name()) + + return ff, errors.New("error creating tmp file") + } + config := map[string]string{ + "foo": "bar", + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_BACKEND_PATH": "vault", + "VAULT_CACERT": "vault-ca-cert", + "VAULT_CLIENT_CERT": "vault-client-cert", + "VAULT_CLIENT_KEY": "vault-client-key", + } + _, _, err := configTLS(context, ns, config) + assert.Error(t, err) + assert.EqualError(t, err, "failed to generate temp file for k8s secret \"vault-ca-cert\" content: error creating tmp file") + assert.NoFileExists(t, os.Getenv("ROOK_TMP_FILE")) + os.Unsetenv("ROOK_TMP_FILE") + }) +} + +func Test_buildKeyContext(t *testing.T) { + t.Run("no vault namespace, return empty map and assignment is possible", func(t *testing.T) { + config := map[string]string{ + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + } + context := buildKeyContext(config) + assert.Len(t, context, 0) + context["foo"] = "bar" + }) + + t.Run("vault namespace, return 1 single element in the map and assignment is possible", func(t *testing.T) { + config := map[string]string{ + "KMS_PROVIDER": "vault", + "VAULT_ADDR": "1.1.1.1", + "VAULT_NAMESPACE": "vault-namespace", + } + context := buildKeyContext(config) + assert.Len(t, context, 1) + context["foo"] = "bar" + assert.Len(t, context, 2) + }) } diff --git a/pkg/daemon/ceph/osd/remove.go b/pkg/daemon/ceph/osd/remove.go index 26c6a940bb23..12bb49c335b4 100644 --- a/pkg/daemon/ceph/osd/remove.go +++ b/pkg/daemon/ceph/osd/remove.go @@ -32,7 +32,7 @@ import ( ) // RemoveOSDs purges a list of OSDs from the cluster -func RemoveOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, osdsToRemove []string) error { +func RemoveOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, osdsToRemove []string, preservePVC bool) error { // Generate the ceph config for running ceph commands similar to the operator if err := client.WriteCephConfig(context, clusterInfo); err != nil { @@ -61,13 +61,13 @@ func RemoveOSDs(context *clusterd.Context, clusterInfo *client.ClusterInfo, osds continue } logger.Infof("osd.%d is marked 'DOWN'. Removing it", osdID) - removeOSD(context, clusterInfo, osdID) + removeOSD(context, clusterInfo, osdID, preservePVC) } return nil } -func removeOSD(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInfo, osdID int) { +func removeOSD(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInfo, osdID int, preservePVC bool) { ctx := context.TODO() // Get the host where the OSD is found hostName, err := client.GetCrushHostName(clusterdContext, clusterInfo, osdID) @@ -106,17 +106,32 @@ func removeOSD(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInf logger.Infof("removing the osd prepare job %q", prepareJob.GetName()) if err := k8sutil.DeleteBatchJob(clusterdContext.Clientset, clusterInfo.Namespace, prepareJob.GetName(), false); err != nil { if err != nil { - // Continue deleting the OSD prepare job even if the deployment fails to be deleted + // Continue with the cleanup even if the job fails to be deleted logger.Errorf("failed to delete prepare job for osd %q. %v", prepareJob.GetName(), err) } } } - // Remove the OSD PVC - logger.Infof("removing the OSD PVC %q", pvcName) - if err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}); err != nil { - if err != nil { - // Continue deleting the OSD PVC even if PVC deletion fails - logger.Errorf("failed to delete pvc for OSD %q. %v", pvcName, err) + if preservePVC { + // Detach the OSD PVC from Rook. We will continue OSD deletion even if failed to remove PVC label + logger.Infof("detach the OSD PVC %q from Rook", pvcName) + if pvc, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Get(ctx, pvcName, metav1.GetOptions{}); err != nil { + logger.Errorf("failed to get pvc for OSD %q. %v", pvcName, err) + } else { + labels := pvc.GetLabels() + delete(labels, osd.CephDeviceSetPVCIDLabelKey) + pvc.SetLabels(labels) + if _, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Update(ctx, pvc, metav1.UpdateOptions{}); err != nil { + logger.Errorf("failed to remove label %q from pvc for OSD %q. %v", osd.CephDeviceSetPVCIDLabelKey, pvcName, err) + } + } + } else { + // Remove the OSD PVC + logger.Infof("removing the OSD PVC %q", pvcName) + if err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(clusterInfo.Namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}); err != nil { + if err != nil { + // Continue deleting the OSD PVC even if PVC deletion fails + logger.Errorf("failed to delete pvc for OSD %q. %v", pvcName, err) + } } } } else { diff --git a/pkg/daemon/ceph/osd/volume.go b/pkg/daemon/ceph/osd/volume.go index 0dda7c3566b5..e6a01c1e30f4 100644 --- a/pkg/daemon/ceph/osd/volume.go +++ b/pkg/daemon/ceph/osd/volume.go @@ -808,7 +808,7 @@ func (a *OsdAgent) initializeDevicesLVMMode(context *clusterd.Context, devices * } for _, report := range cvReports { - if report.BlockDB != mdPath { + if report.BlockDB != mdPath && !strings.HasSuffix(mdPath, report.BlockDB) { return errors.Errorf("wrong db device for %s, required: %s, actual: %s", report.Data, mdPath, report.BlockDB) } } diff --git a/pkg/daemon/ceph/osd/volume_test.go b/pkg/daemon/ceph/osd/volume_test.go index 4e950671577b..bc5be24f451c 100644 --- a/pkg/daemon/ceph/osd/volume_test.go +++ b/pkg/daemon/ceph/osd/volume_test.go @@ -1333,15 +1333,14 @@ func TestIsNewStyledLvmBatch(t *testing.T) { } func TestInitializeBlockWithMD(t *testing.T) { - // Common vars for all the tests - devices := &DeviceOsdMapping{ - Entries: map[string]*DeviceOsdIDEntry{ - "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "/dev/sdd"}}, - }, - } - // Test default behavior { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "/dev/sdd"}}, + }, + } + executor := &exectest.MockExecutor{} executor.MockExecuteCommand = func(command string, args ...string) error { logger.Infof("%s %v", command, args) @@ -1373,6 +1372,46 @@ func TestInitializeBlockWithMD(t *testing.T) { err := a.initializeDevicesLVMMode(context, devices) assert.NoError(t, err, "failed default behavior test") } + + // Test initialize with LV as metadata devices + { + devices := &DeviceOsdMapping{ + Entries: map[string]*DeviceOsdIDEntry{ + "sda": {Data: -1, Metadata: nil, Config: DesiredDevice{Name: "/dev/sda", MetadataDevice: "vg0/lv0"}}, + }, + } + executor := &exectest.MockExecutor{} + executor.MockExecuteCommand = func(command string, args ...string) error { + logger.Infof("%s %v", command, args) + + // Validate base common args + err := testBaseArgs(args) + if err != nil { + return err + } + + // Second command + if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/vg0/lv0" { + return nil + } + + return errors.Errorf("unknown command %s %s", command, args) + } + executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { + // First command + if args[9] == "--osds-per-device" && args[10] == "1" && args[11] == "/dev/sda" && args[12] == "--db-devices" && args[13] == "/dev/vg0/lv0" && args[14] == "--report" { + return `[{"block_db": "vg0/lv0", "encryption": "None", "data": "/dev/sda", "data_size": "100.00 GB", "block_db_size": "10.00 GB"}]`, nil + } + + return "", errors.Errorf("unknown command %s %s", command, args) + } + a := &OsdAgent{clusterInfo: &cephclient.ClusterInfo{CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Extra: 4}}, nodeName: "node1"} + context := &clusterd.Context{Executor: executor} + + err := a.initializeDevicesLVMMode(context, devices) + assert.NoError(t, err, "failed LV as metadataDevice test") + } + } func TestUseRawMode(t *testing.T) { diff --git a/pkg/daemon/discover/discover.go b/pkg/daemon/discover/discover.go index e735fa82452b..50d590eee506 100644 --- a/pkg/daemon/discover/discover.go +++ b/pkg/daemon/discover/discover.go @@ -158,6 +158,7 @@ func rawUdevBlockMonitor(c chan string, matches, exclusions []string) { logger.Warningf("Cannot open udevadm stdout: %v", err) return } + defer stdout.Close() err = cmd.Start() if err != nil { diff --git a/pkg/operator/cassandra/constants/constants.go b/pkg/operator/cassandra/constants/constants.go deleted file mode 100644 index b9d6b7d67dd5..000000000000 --- a/pkg/operator/cassandra/constants/constants.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package constants - -// These labels are only used on the ClusterIP services -// acting as each member's identity (static ip). -// Each of these labels is a record of intent to do -// something. The controller sets these labels and each -// member watches for them and takes the appropriate -// actions. -// -// See the sidecar design doc for more details. -const ( - // SeedLabel determines if a member is a seed or not. - SeedLabel = "cassandra.rook.io/seed" - - // DecommissionLabel expresses the intent to decommission - // the specific member. The presence of the label expresses - // the intent to decommission. If the value is true, it means - // the member has finished decommissioning. - // Values: {true, false} - DecommissionLabel = "cassandra.rook.io/decommissioned" - - // DeveloperModeAnnotation is present when the user wishes - // to bypass production-readiness checks and start the database - // either way. Currently useful for scylla, may get removed - // once configMapName field is implemented in Cluster CRD. - DeveloperModeAnnotation = "cassandra.rook.io/developer-mode" - - LabelValueTrue = "true" - LabelValueFalse = "false" -) - -// Generic Labels used on objects created by the operator. -const ( - ClusterNameLabel = "cassandra.rook.io/cluster" - DatacenterNameLabel = "cassandra.rook.io/datacenter" - RackNameLabel = "cassandra.rook.io/rack" - - AppName = "rook-cassandra" - OperatorAppName = "rook-cassandra-operator" -) - -// Environment Variable Names -const ( - PodIPEnvVar = "POD_IP" - - ResourceLimitCPUEnvVar = "CPU_LIMIT" - ResourceLimitMemoryEnvVar = "MEMORY_LIMIT" -) - -// Configuration Values -const ( - SharedDirName = "/mnt/shared" - PluginDirName = SharedDirName + "/" + "plugins" - - DataDirCassandra = "/var/lib/cassandra" - DataDirScylla = "/var/lib/scylla" - - JolokiaJarName = "jolokia.jar" - JolokiaPort = 8778 - JolokiaContext = "jolokia" - - ReadinessProbePath = "/readyz" - LivenessProbePath = "/healthz" - ProbePort = 8080 -) diff --git a/pkg/operator/cassandra/controller/cleanup.go b/pkg/operator/cassandra/controller/cleanup.go deleted file mode 100644 index e69495dbb12b..000000000000 --- a/pkg/operator/cassandra/controller/cleanup.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// cleanup deletes all resources remaining because of cluster scale downs -func (cc *ClusterController) cleanup(c *cassandrav1alpha1.Cluster) error { - - for _, r := range c.Spec.Datacenter.Racks { - services, err := cc.serviceLister.Services(c.Namespace).List(util.RackSelector(r, c)) - if err != nil { - return fmt.Errorf("error listing member services: %s", err.Error()) - } - // Get rack status. If it doesn't exist, the rack isn't yet created. - stsName := util.StatefulSetNameForRack(r, c) - sts, err := cc.statefulSetLister.StatefulSets(c.Namespace).Get(stsName) - if apierrors.IsNotFound(err) { - continue - } - if err != nil { - return fmt.Errorf("error getting statefulset %s: %s", stsName, err.Error()) - } - memberCount := *sts.Spec.Replicas - memberServiceCount := int32(len(services)) - // If there are more services than members, some services need to be cleaned up - if memberServiceCount > memberCount { - maxIndex := memberCount - 1 - for _, svc := range services { - svcIndex, err := util.IndexFromName(svc.Name) - if err != nil { - logger.Errorf("Unexpected error while parsing index from name %s : %s", svc.Name, err.Error()) - continue - } - if svcIndex > maxIndex { - err := cc.cleanupMemberResources(svc.Name, r, c) - if err != nil { - return fmt.Errorf("error cleaning up member resources: %s", err.Error()) - } - } - } - } - } - logger.Infof("%s/%s - Successfully cleaned up cluster.", c.Namespace, c.Name) - return nil -} - -// cleanupMemberResources deletes all resources associated with a given member. -// Currently those are : -// - A PVC -// - A ClusterIP Service -func (cc *ClusterController) cleanupMemberResources(memberName string, r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) error { - ctx := context.TODO() - logger.Infof("%s/%s - Cleaning up resources for member %s", c.Namespace, c.Name, memberName) - // Delete PVC - if len(r.Storage.VolumeClaimTemplates) > 0 { - // PVC naming convention for StatefulSets is - - pvcName := fmt.Sprintf("%s-%s", r.Storage.VolumeClaimTemplates[0].Name, memberName) - err := cc.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting pvc %s: %s", pvcName, err.Error()) - } - } - - // Delete Member Service - err := cc.kubeClient.CoreV1().Services(c.Namespace).Delete(ctx, memberName, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("error deleting member service %s: %s", memberName, err.Error()) - } - return nil -} diff --git a/pkg/operator/cassandra/controller/cluster.go b/pkg/operator/cassandra/controller/cluster.go deleted file mode 100644 index 9238a21fc30c..000000000000 --- a/pkg/operator/cassandra/controller/cluster.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// UpdateStatus updates the status of the given Cassandra Cluster. -// It doesn't post the result to the API Server yet. -// That will be done at the end of the sync loop. -func (cc *ClusterController) updateStatus(c *cassandrav1alpha1.Cluster) error { - clusterStatus := cassandrav1alpha1.ClusterStatus{ - Racks: map[string]*cassandrav1alpha1.RackStatus{}, - } - logger.Infof("Updating Status for cluster %s in namespace %s", c.Name, c.Namespace) - - for _, rack := range c.Spec.Datacenter.Racks { - - status := &cassandrav1alpha1.RackStatus{} - - // Get corresponding StatefulSet from lister - sts, err := cc.statefulSetLister.StatefulSets(c.Namespace). - Get(util.StatefulSetNameForRack(rack, c)) - // If it wasn't found, continue - if apierrors.IsNotFound(err) { - continue - } - // If we got a different error, requeue and log it - if err != nil { - return fmt.Errorf("error trying to get StatefulSet %s in namespace %s: %s", sts.Name, sts.Namespace, err.Error()) - } - - // Update Members - status.Members = *sts.Spec.Replicas - // Update ReadyMembers - status.ReadyMembers = sts.Status.ReadyReplicas - - // Update Scaling Down condition - services, err := util.GerMemberServicesForRack(rack, c, cc.serviceLister) - if err != nil { - return fmt.Errorf("error trying to get Pods for rack %s", rack.Name) - } - for _, svc := range services { - // Check if there is a decommission in progress - if _, ok := svc.Labels[constants.DecommissionLabel]; ok { - // Add MemberLeaving Condition to rack status - status.Conditions = append(status.Conditions, cassandrav1alpha1.RackCondition{ - Type: cassandrav1alpha1.RackConditionTypeMemberLeaving, - Status: cassandrav1alpha1.ConditionTrue, - }) - // Sanity check. Only the last member should be decommissioning. - index, err := util.IndexFromName(svc.Name) - if err != nil { - return err - } - if index != status.Members-1 { - return fmt.Errorf("only last member of each rack should be decommissioning, but %d-th member of %s found decommissioning while rack had %d members", index, rack.Name, status.Members) - } - } - } - - // Update Status for Rack - clusterStatus.Racks[rack.Name] = status - } - - c.Status = clusterStatus - return nil -} - -// SyncCluster checks the Status and performs reconciliation for -// the given Cassandra Cluster. -func (cc *ClusterController) syncCluster(c *cassandrav1alpha1.Cluster) error { - // Check if any rack isn't created - for _, rack := range c.Spec.Datacenter.Racks { - // For each rack, check if a status entry exists - if _, ok := c.Status.Racks[rack.Name]; !ok { - logger.Infof("Attempting to create Rack %s", rack.Name) - err := cc.createRack(rack, c) - return err - } - } - - // Check if there is a scale-down in progress - for _, rack := range c.Spec.Datacenter.Racks { - if util.IsRackConditionTrue(c.Status.Racks[rack.Name], cassandrav1alpha1.RackConditionTypeMemberLeaving) { - // Resume scale down - err := cc.scaleDownRack(rack, c) - return err - } - } - - // Check that all racks are ready before taking any action - for _, rack := range c.Spec.Datacenter.Racks { - rackStatus := c.Status.Racks[rack.Name] - if rackStatus.Members != rackStatus.ReadyMembers { - logger.Infof("Rack %s is not ready, %+v", rack.Name, *rackStatus) - return nil - } - } - - // Check if any rack needs to scale down - for _, rack := range c.Spec.Datacenter.Racks { - if rack.Members < c.Status.Racks[rack.Name].Members { - // scale down - err := cc.scaleDownRack(rack, c) - return err - } - } - - // Check if any rack needs to scale up - for _, rack := range c.Spec.Datacenter.Racks { - - if rack.Members > c.Status.Racks[rack.Name].Members { - logger.Infof("Attempting to scale rack %s", rack.Name) - err := cc.scaleUpRack(rack, c) - return err - } - } - - return nil -} - -// createRack creates a new Cassandra Rack with 0 Members. -func (cc *ClusterController) createRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) error { - ctx := context.TODO() - sts := util.StatefulSetForRack(r, c, cc.rookImage) - c.Spec.Annotations.Merge(r.Annotations).ApplyToObjectMeta(&sts.Spec.Template.ObjectMeta) - c.Spec.Annotations.Merge(r.Annotations).ApplyToObjectMeta(&sts.ObjectMeta) - existingStatefulset, err := cc.statefulSetLister.StatefulSets(sts.Namespace).Get(sts.Name) - if err == nil { - return util.VerifyOwner(existingStatefulset, c) - } - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("Error trying to create StatefulSet %s in namespace %s : %s", sts.Name, sts.Namespace, err.Error()) - } - - _, err = cc.kubeClient.AppsV1().StatefulSets(sts.Namespace).Create(ctx, sts, metav1.CreateOptions{}) - - if err == nil { - cc.recorder.Event( - c, - corev1.EventTypeNormal, - SuccessSynced, - fmt.Sprintf(MessageRackCreated, r.Name), - ) - } - - if err != nil { - logger.Errorf("Unexpected error while creating rack for cluster %+v: %s", c, err.Error()) - } - - return err -} - -// scaleUpRack handles scaling up for an existing Cassandra Rack. -// Calling this action implies all members of the Rack are Ready. -func (cc *ClusterController) scaleUpRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) error { - sts, err := cc.statefulSetLister.StatefulSets(c.Namespace).Get(util.StatefulSetNameForRack(r, c)) - if err != nil { - return fmt.Errorf("error trying to scale rack %s in namespace %s, underlying StatefulSet not found", r.Name, c.Namespace) - } - - logger.Infof("Attempting to scale up Rack %s", r.Name) - - err = util.ScaleStatefulSet(sts, 1, cc.kubeClient) - - if err == nil { - cc.recorder.Event( - c, - corev1.EventTypeNormal, - SuccessSynced, - fmt.Sprintf(MessageRackScaledUp, r.Name, *sts.Spec.Replicas+1), - ) - } - - return err - -} - -// scaleDownRack handles scaling down for an existing Cassandra Rack. -// Calling this action implies all members of the Rack are Ready. -func (cc *ClusterController) scaleDownRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) error { - logger.Infof("Scaling down rack %s", r.Name) - - // Get the current actual number of Members - members := c.Status.Racks[r.Name].Members - - // Find the member to decommission - memberName := fmt.Sprintf("%s-%d", util.StatefulSetNameForRack(r, c), members-1) - logger.Infof("Member of interest: %s", memberName) - memberService, err := cc.serviceLister.Services(c.Namespace).Get(memberName) - if err != nil { - return fmt.Errorf("error trying to get Member Service %s: %s", memberName, err.Error()) - } - - // Check if there was a scale down in progress that has completed. - if memberService.Labels[constants.DecommissionLabel] == constants.LabelValueTrue { - - logger.Infof("Found decommissioned member: %s", memberName) - - // Get rack's statefulset - stsName := util.StatefulSetNameForRack(r, c) - sts, err := cc.statefulSetLister.StatefulSets(c.Namespace).Get(stsName) - if err != nil { - return fmt.Errorf("error trying to get StatefulSet %s", stsName) - } - // Scale the statefulset - err = util.ScaleStatefulSet(sts, -1, cc.kubeClient) - if err != nil { - return fmt.Errorf("error trying to scale down StatefulSet %s", stsName) - } - // Cleanup is done on each sync loop, no need to do anything else here - - cc.recorder.Event( - c, - corev1.EventTypeNormal, - SuccessSynced, - fmt.Sprintf(MessageRackScaledDown, r.Name, members-1), - ) - return nil - } - - logger.Infof("Checking for scale down. Desired: %d. Actual: %d", r.Members, c.Status.Racks[r.Name].Members) - // Then, check if there is a requested scale down. - if r.Members < c.Status.Racks[r.Name].Members { - - logger.Infof("Scale down requested, member %s will decommission", memberName) - // Record the intent to decommission the member - old := memberService.DeepCopy() - memberService.Labels[constants.DecommissionLabel] = constants.LabelValueFalse - if err := util.PatchService(old, memberService, cc.kubeClient); err != nil { - return fmt.Errorf("error patching member service %s: %s", memberName, err.Error()) - } - - cc.recorder.Event( - c, - corev1.EventTypeNormal, - SuccessSynced, - fmt.Sprintf(MessageRackScaleDownInProgress, r.Name, members-1), - ) - } - - return nil -} diff --git a/pkg/operator/cassandra/controller/cluster_test.go b/pkg/operator/cassandra/controller/cluster_test.go deleted file mode 100644 index d70c84428c43..000000000000 --- a/pkg/operator/cassandra/controller/cluster_test.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "testing" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - casstest "github.com/rook/rook/pkg/operator/cassandra/test" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func TestCreateRack(t *testing.T) { - ctx := context.TODO() - simpleCluster := casstest.NewSimpleCluster(3) - - tests := []struct { - name string - kubeObjects []runtime.Object - rack cassandrav1alpha1.RackSpec - cluster *cassandrav1alpha1.Cluster - expectedErr bool - }{ - { - name: "new rack", - kubeObjects: nil, - rack: simpleCluster.Spec.Datacenter.Racks[0], - cluster: simpleCluster, - expectedErr: false, - }, - { - name: "sts already exists", - kubeObjects: []runtime.Object{ - util.StatefulSetForRack(simpleCluster.Spec.Datacenter.Racks[0], simpleCluster, ""), - }, - rack: simpleCluster.Spec.Datacenter.Racks[0], - cluster: simpleCluster, - expectedErr: false, - }, - { - name: "sts exists with different owner", - kubeObjects: []runtime.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.StatefulSetNameForRack(simpleCluster.Spec.Datacenter.Racks[0], simpleCluster), - Namespace: simpleCluster.Namespace, - OwnerReferences: nil, - }, - Spec: appsv1.StatefulSetSpec{}, - }, - }, - rack: simpleCluster.Spec.Datacenter.Racks[0], - cluster: simpleCluster, - expectedErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - cc := newFakeClusterController(t, test.kubeObjects, nil) - - if err := cc.createRack(test.rack, test.cluster); err == nil { - if test.expectedErr { - t.Errorf("Expected an error, got none.") - } else { - - var sts *appsv1.StatefulSet - sts, err = cc.kubeClient.AppsV1().StatefulSets(test.cluster.Namespace). - Get(ctx, util.StatefulSetNameForRack(test.rack, test.cluster), metav1.GetOptions{}) - if err != nil { - t.Errorf("Couldn't retrieve expected StatefulSet: %s", err.Error()) - } else { - t.Logf("Got StatefulSet as expected: %s", sts.Name) - } - } - } else { - if test.expectedErr { - t.Logf("Got an error as expected: %s", err.Error()) - } else { - t.Errorf("Unexpected error: %s", err.Error()) - } - } - }) - } -} - -func TestScaleUpRack(t *testing.T) { - ctx := context.TODO() - currMembers := int32(2) - expMembers := int32(3) - c := casstest.NewSimpleCluster(expMembers) - r := c.Spec.Datacenter.Racks[0] - sts := util.StatefulSetForRack(r, c, "") - *sts.Spec.Replicas = currMembers - - tests := []struct { - name string - kubeObjects []runtime.Object - rack cassandrav1alpha1.RackSpec - rackStatus *cassandrav1alpha1.RackStatus - cluster *cassandrav1alpha1.Cluster - expectedErr bool - }{ - { - name: "normal", - kubeObjects: []runtime.Object{sts}, - rack: r, - rackStatus: &cassandrav1alpha1.RackStatus{Members: currMembers, ReadyMembers: currMembers}, - cluster: c, - expectedErr: false, - }, - { - name: "statefulset missing", - kubeObjects: []runtime.Object{}, - rack: r, - rackStatus: &cassandrav1alpha1.RackStatus{Members: currMembers, ReadyMembers: currMembers}, - cluster: c, - expectedErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - - cc := newFakeClusterController(t, test.kubeObjects, nil) - - test.cluster.Status = cassandrav1alpha1.ClusterStatus{ - Racks: map[string]*cassandrav1alpha1.RackStatus{ - "test-rack": test.rackStatus, - }, - } - err := cc.scaleUpRack(test.rack, test.cluster) - - if err == nil { - if test.expectedErr { - t.Errorf("Expected an error, got none.") - } else { - sts, err := cc.kubeClient.AppsV1().StatefulSets(test.cluster.Namespace). - Get(ctx, util.StatefulSetNameForRack(test.rack, test.cluster), metav1.GetOptions{}) - if err != nil { - t.Errorf("Couldn't retrieve expected StatefulSet: %s", err.Error()) - return - } - expectedReplicas := test.rackStatus.Members + 1 - actualReplicas := *sts.Spec.Replicas - if actualReplicas != expectedReplicas { - t.Errorf("Error, expected %d replicas, got %d.", expectedReplicas, actualReplicas) - return - } - t.Logf("Rack scaled to %d members as expected", actualReplicas) - } - } else { - if test.expectedErr { - t.Logf("Got an error as expected: %s", err.Error()) - } - } - - }) - } -} - -func TestScaleDownRack(t *testing.T) { - ctx := context.TODO() - desired := int32(2) - actual := int32(3) - - c := casstest.NewSimpleCluster(desired) - r := c.Spec.Datacenter.Racks[0] - c.Status = cassandrav1alpha1.ClusterStatus{ - Racks: map[string]*cassandrav1alpha1.RackStatus{ - r.Name: { - Members: actual, - ReadyMembers: actual, - }, - }, - } - sts := util.StatefulSetForRack(r, c, "") - memberServices := casstest.MemberServicesForCluster(c) - - // Find the member to decommission - memberName := fmt.Sprintf("%s-%d", util.StatefulSetNameForRack(r, c), actual-1) - - t.Run("scale down requested and started", func(t *testing.T) { - - kubeObjects := append(memberServices, sts) - rookObjects := []runtime.Object{c} - cc := newFakeClusterController(t, kubeObjects, rookObjects) - - err := cc.scaleDownRack(r, c) - require.NoErrorf(t, err, "Unexpected error while scaling down: %v", err) - - // Check that MemberService has the decommissioned label - svc, err := cc.serviceLister.Services(c.Namespace).Get(memberName) - require.NoErrorf(t, err, "Unexpected error while getting MemberService: %v", err) - - val, ok := svc.Labels[constants.DecommissionLabel] - require.True(t, ok, "Service didn't have the decommissioned label as expected") - require.Truef(t, val == constants.LabelValueFalse, "Decommissioned Label had unexpected value: %s", val) - - }) - - t.Run("scale down resumed", func(t *testing.T) { - - sts.Spec.Replicas = &actual - - kubeObjects := append(memberServices, sts) - rookObjects := []runtime.Object{c} - - cc := newFakeClusterController(t, kubeObjects, rookObjects) - - svc, err := cc.serviceLister.Services(c.Namespace).Get(memberName) - require.NoErrorf(t, err, "Unexpected error while getting MemberService: %v", err) - - // Mark as decommissioned - svc.Labels[constants.DecommissionLabel] = constants.LabelValueTrue - _, err = cc.kubeClient.CoreV1().Services(svc.Namespace).Update(ctx, svc, metav1.UpdateOptions{}) - require.Nilf(t, err, "Unexpected error while updating MemberService: %v", err) - - // Resume decommission - err = cc.scaleDownRack(r, c) - require.NoErrorf(t, err, "Unexpected error while resuming scale down: %v", err) - - // Check that StatefulSet is scaled - updatedSts, err := cc.kubeClient.AppsV1().StatefulSets(sts.Namespace).Get(ctx, sts.Name, metav1.GetOptions{}) - require.NoErrorf(t, err, "Unexpected error while getting statefulset: %v", err) - require.Truef(t, *updatedSts.Spec.Replicas == *sts.Spec.Replicas-1, "Statefulset has incorrect number of replicas. Expected: %d, got %d.", *sts.Spec.Replicas-1, *updatedSts.Spec.Replicas) - - }) - -} diff --git a/pkg/operator/cassandra/controller/controller.go b/pkg/operator/cassandra/controller/controller.go deleted file mode 100644 index 3fcd6619bdf8..000000000000 --- a/pkg/operator/cassandra/controller/controller.go +++ /dev/null @@ -1,369 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "reflect" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/davecgh/go-spew/spew" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - rookClientset "github.com/rook/rook/pkg/client/clientset/versioned" - rookScheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - informersv1alpha1 "github.com/rook/rook/pkg/client/informers/externalversions/cassandra.rook.io/v1alpha1" - listersv1alpha1 "github.com/rook/rook/pkg/client/listers/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - appsinformers "k8s.io/client-go/informers/apps/v1" - coreinformers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" -) - -const ( - controllerName = "cassandra-controller" - clusterQueueName = "cluster-queue" -) - -var logger = capnslog.NewPackageLogger("github.com/rook/rook", "cassandra-controller") - -// ClusterController encapsulates all the tools the controller needs -// in order to talk to the Kubernetes API -type ClusterController struct { - rookImage string - kubeClient kubernetes.Interface - rookClient rookClientset.Interface - clusterLister listersv1alpha1.ClusterLister - clusterListerSynced cache.InformerSynced - statefulSetLister appslisters.StatefulSetLister - statefulSetListerSynced cache.InformerSynced - serviceLister corelisters.ServiceLister - serviceListerSynced cache.InformerSynced - podLister corelisters.PodLister - podListerSynced cache.InformerSynced - - // queue is a rate limited work queue. This is used to queue work to be - // processed instead of performing it as soon as a change happens. This - // means we can ensure we only process a fixed amount of resources at a - // time, and makes it easy to ensure we are never processing the same item - // simultaneously in two different workers. - queue workqueue.RateLimitingInterface - // recorder is an event recorder for recording Event resources to the Kubernetes API - recorder record.EventRecorder -} - -// New returns a new ClusterController -func New( - rookImage string, - kubeClient kubernetes.Interface, - rookClient rookClientset.Interface, - clusterInformer informersv1alpha1.ClusterInformer, - statefulSetInformer appsinformers.StatefulSetInformer, - serviceInformer coreinformers.ServiceInformer, - podInformer coreinformers.PodInformer, -) *ClusterController { - - // Add sample-controller types to the default Kubernetes Scheme so Events can be - // logged for sample-controller types. - if err := rookScheme.AddToScheme(scheme.Scheme); err != nil { - logger.Errorf("failed to add to the default kubernetes scheme. %v", err) - } - // Create event broadcaster - logger.Infof("creating event broadcaster...") - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(logger.Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) - - cc := &ClusterController{ - rookImage: rookImage, - kubeClient: kubeClient, - rookClient: rookClient, - - clusterLister: clusterInformer.Lister(), - clusterListerSynced: clusterInformer.Informer().HasSynced, - statefulSetLister: statefulSetInformer.Lister(), - statefulSetListerSynced: statefulSetInformer.Informer().HasSynced, - podLister: podInformer.Lister(), - podListerSynced: podInformer.Informer().HasSynced, - serviceLister: serviceInformer.Lister(), - serviceListerSynced: serviceInformer.Informer().HasSynced, - - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), clusterQueueName), - recorder: recorder, - } - - // Add event handling functions - - clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - newCluster, ok := obj.(*cassandrav1alpha1.Cluster) - if !ok { - return - } - cc.enqueueCluster(newCluster) - }, - UpdateFunc: func(old, new interface{}) { - newCluster, ok := new.(*cassandrav1alpha1.Cluster) - if !ok { - return - } - oldCluster, ok := old.(*cassandrav1alpha1.Cluster) - if !ok { - return - } - // If the Spec is the same as the one in our cache, there aren't - // any changes we are interested in. - if reflect.DeepEqual(newCluster.Spec, oldCluster.Spec) { - return - } - cc.enqueueCluster(newCluster) - }, - //Deletion handling: - // Atm, the only thing left behind will be the state, ie - // the PVCs that the StatefulSets don't erase. - // This behaviour may actually be preferable to deleting them, - // since it ensures that no data will be lost if someone accidentally - // deletes the cluster. - }) - - statefulSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: cc.handleObject, - UpdateFunc: func(old, new interface{}) { - newStatefulSet, ok := new.(*appsv1.StatefulSet) - if !ok { - return - } - oldStatefulSet, ok := old.(*appsv1.StatefulSet) - if !ok { - return - } - // If the StatefulSet is the same as the one in our cache, there - // is no use adding it again. - if newStatefulSet.ResourceVersion == oldStatefulSet.ResourceVersion { - return - } - // If ObservedGeneration != Generation, it means that the StatefulSet controller - // has not yet processed the current StatefulSet object. - // That means its Status is stale and we don't want to queue it. - if newStatefulSet.Status.ObservedGeneration != newStatefulSet.Generation { - return - } - cc.handleObject(new) - }, - DeleteFunc: cc.handleObject, - }) - - serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - service, ok := obj.(*corev1.Service) - if !ok { - return - } - if service.Spec.ClusterIP == corev1.ClusterIPNone { - return - } - cc.handleObject(obj) - }, - UpdateFunc: func(old, new interface{}) { - newService, ok := new.(*corev1.Service) - if !ok { - return - } - oldService, ok := old.(*corev1.Service) - if !ok { - return - } - if oldService.ResourceVersion == newService.ResourceVersion { - return - } - cc.handleObject(new) - }, - DeleteFunc: func(obj interface{}) { - // TODO: investigate if further action needs to be taken - }, - }) - - return cc -} - -// Run starts the ClusterController process loop -func (cc *ClusterController) Run(threadiness int, stopCh <-chan struct{}) error { - defer runtime.HandleCrash() - defer cc.queue.ShutDown() - - // Start the informer factories to begin populating the informer caches - logger.Info("starting cassandra controller") - - // Wait for the caches to be synced before starting workers - logger.Info("waiting for informers caches to sync...") - if ok := cache.WaitForCacheSync( - stopCh, - cc.clusterListerSynced, - cc.statefulSetListerSynced, - cc.podListerSynced, - cc.serviceListerSynced, - ); !ok { - return fmt.Errorf("failed to wait for caches to sync") - } - - logger.Info("starting workers") - for i := 0; i < threadiness; i++ { - go wait.Until(cc.runWorker, time.Second, stopCh) - } - - logger.Info("started workers") - <-stopCh - logger.Info("Shutting down cassandra controller workers") - - return nil -} - -func (cc *ClusterController) runWorker() { - for cc.processNextWorkItem() { - } -} - -func (cc *ClusterController) processNextWorkItem() bool { - obj, shutdown := cc.queue.Get() - - if shutdown { - return false - } - - err := func(obj interface{}) error { - defer cc.queue.Done(obj) - key, ok := obj.(string) - if !ok { - cc.queue.Forget(obj) - runtime.HandleError(fmt.Errorf("expected string in queue but got %#v", obj)) - } - if err := cc.syncHandler(key); err != nil { - cc.queue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s', requeueing: %s", key, err.Error()) - } - cc.queue.Forget(obj) - logger.Infof("Successfully synced '%s'", key) - return nil - }(obj) - - if err != nil { - runtime.HandleError(err) - return true - } - - return true -} - -// syncHandler compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Cluster -// resource with the current status of the resource. -func (cc *ClusterController) syncHandler(key string) error { - - // Convert the namespace/name string into a distinct namespace and name. - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) - return nil - } - - // Get the Cluster resource with this namespace/name - cluster, err := cc.clusterLister.Clusters(namespace).Get(name) - if err != nil { - // The Cluster resource may no longer exist, in which case we stop processing. - if apierrors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("cluster '%s' in work queue no longer exists", key)) - return nil - } - return fmt.Errorf("Unexpected error while getting cluster object: %s", err) - } - - logger.Infof("handling cluster object: %+v", spew.Sdump(cluster)) - // Deepcopy here to ensure nobody messes with the cache. - old, new := cluster, cluster.DeepCopy() - // If sync was successful and Status has changed, update the Cluster. - if err = cc.Sync(new); err == nil && !reflect.DeepEqual(old.Status, new.Status) { - err = util.PatchClusterStatus(new, cc.rookClient) - } - - return err -} - -// enqueueCluster takes a Cluster resource and converts it into a namespace/name -// string which is then put onto the work queue. This method should not be -// passed resources of any type other than Cluster. -func (cc *ClusterController) enqueueCluster(obj *cassandrav1alpha1.Cluster) { - var key string - var err error - if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - runtime.HandleError(err) - return - } - cc.queue.AddRateLimited(key) -} - -// handleObject will take any resource implementing metav1.Object and attempt -// to find the Cluster resource that 'owns' it. It does this by looking at the -// objects metadata.ownerReferences field for an appropriate OwnerReference. -// It then enqueues that Cluster resource to be processed. If the object does not -// have an appropriate OwnerReference, it will simply be skipped. -func (cc *ClusterController) handleObject(obj interface{}) { - var object metav1.Object - var ok bool - if object, ok = obj.(metav1.Object); !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - runtime.HandleError(fmt.Errorf("error decoding object, invalid type")) - return - } - object, ok = tombstone.Obj.(metav1.Object) - if !ok { - runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) - return - } - logger.Infof("Recovered deleted object '%s' from tombstone", object.GetName()) - } - logger.Infof("Processing object: %s", object.GetName()) - if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { - // If the object is not a Cluster or doesn't belong to our APIVersion, skip it. - if ownerRef.Kind != "Cluster" || ownerRef.APIVersion != cassandrav1alpha1.APIVersion { - return - } - - cluster, err := cc.clusterLister.Clusters(object.GetNamespace()).Get(ownerRef.Name) - if err != nil { - logger.Infof("ignoring orphaned object '%s' of cluster '%s'", object.GetSelfLink(), ownerRef.Name) - return - } - - cc.enqueueCluster(cluster) - return - } -} diff --git a/pkg/operator/cassandra/controller/controller_test.go b/pkg/operator/cassandra/controller/controller_test.go deleted file mode 100644 index ac0cf877fff4..000000000000 --- a/pkg/operator/cassandra/controller/controller_test.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "testing" - "time" - - rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - rookScheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" - rookinformers "github.com/rook/rook/pkg/client/informers/externalversions" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeinformers "k8s.io/client-go/informers" - kubefake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" -) - -const informerResyncPeriod = time.Millisecond - -// newFakeClusterController returns a ClusterController with fake clientsets -// and informers. -// The kubeObjects and rookObjects given as input are injected into the informers' cache. -func newFakeClusterController(t *testing.T, kubeObjects []runtime.Object, rookObjects []runtime.Object) *ClusterController { - - // Add sample-controller types to the default Kubernetes Scheme so Events can be - // logged for sample-controller types. - err := rookScheme.AddToScheme(scheme.Scheme) - if err != nil { - assert.NoError(t, err) - } - - kubeClient := kubefake.NewSimpleClientset(kubeObjects...) - rookClient := rookfake.NewSimpleClientset(rookObjects...) - - kubeSharedInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, informerResyncPeriod) - rookSharedInformerFactory := rookinformers.NewSharedInformerFactory(rookClient, informerResyncPeriod) - stopCh := make(chan struct{}) - - eventBroadcaster := record.NewBroadcaster() - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) - - cc := &ClusterController{ - rookImage: "", - kubeClient: kubeClient, - rookClient: rookClient, - - clusterLister: rookSharedInformerFactory.Cassandra().V1alpha1().Clusters().Lister(), - clusterListerSynced: rookSharedInformerFactory.Cassandra().V1alpha1().Clusters().Informer().HasSynced, - statefulSetLister: kubeSharedInformerFactory.Apps().V1().StatefulSets().Lister(), - statefulSetListerSynced: kubeSharedInformerFactory.Apps().V1().StatefulSets().Informer().HasSynced, - podLister: kubeSharedInformerFactory.Core().V1().Pods().Lister(), - podListerSynced: kubeSharedInformerFactory.Core().V1().Pods().Informer().HasSynced, - serviceLister: kubeSharedInformerFactory.Core().V1().Services().Lister(), - serviceListerSynced: kubeSharedInformerFactory.Core().V1().Services().Informer().HasSynced, - - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), clusterQueueName), - recorder: recorder, - } - - kubeSharedInformerFactory.Start(stopCh) - rookSharedInformerFactory.Start(stopCh) - - cache.WaitForCacheSync( - stopCh, - cc.clusterListerSynced, - cc.statefulSetListerSynced, - cc.serviceListerSynced, - cc.podListerSynced, - ) - - return cc -} diff --git a/pkg/operator/cassandra/controller/service.go b/pkg/operator/cassandra/controller/service.go deleted file mode 100644 index 11b3eb26820b..000000000000 --- a/pkg/operator/cassandra/controller/service.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "strings" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// SyncClusterHeadlessService checks if a Headless Service exists -// for the given Cluster, in order for the StatefulSets to utilize it. -// If it doesn't exists, then create it. -func (cc *ClusterController) syncClusterHeadlessService(c *cassandrav1alpha1.Cluster) error { - clusterHeadlessService := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.HeadlessServiceNameForCluster(c), - Namespace: c.Namespace, - Labels: util.ClusterLabels(c), - OwnerReferences: []metav1.OwnerReference{util.NewControllerRef(c)}, - }, - Spec: corev1.ServiceSpec{ - ClusterIP: corev1.ClusterIPNone, - Type: corev1.ServiceTypeClusterIP, - Selector: util.ClusterLabels(c), - // Necessary to specify a Port to work correctly - // https://github.com/kubernetes/kubernetes/issues/32796 - // TODO: find in what version this was fixed - Ports: []corev1.ServicePort{ - { - Name: "prometheus", - Port: 9180, - }, - }, - }, - } - - logger.Infof("Syncing ClusterHeadlessService `%s` for Cluster `%s`", clusterHeadlessService.Name, c.Name) - - return cc.syncService(clusterHeadlessService, c) -} - -// SyncMemberServices checks, for every Pod of the Cluster that -// has been created, if a corresponding ClusterIP Service exists, -// which will serve as a static ip. -// If it doesn't exist, it creates it. -// It also assigns the first two members of each rack as seeds. -func (cc *ClusterController) syncMemberServices(c *cassandrav1alpha1.Cluster) error { - - pods, err := util.GetPodsForCluster(c, cc.podLister) - if err != nil { - return err - } - - // For every Pod of the cluster that exists, check that a - // a corresponding ClusterIP Service exists, and if it doesn't, - // create it. - logger.Infof("Syncing MemberServices for Cluster `%s`", c.Name) - for _, pod := range pods { - if err := cc.syncService(memberServiceForPod(pod, c), c); err != nil { - logger.Errorf("Error syncing member service for '%s'", pod.Name) - return err - } - } - return nil -} - -// syncService checks if the given Service exists and creates it if it doesn't -// it creates it -func (cc *ClusterController) syncService(s *corev1.Service, c *cassandrav1alpha1.Cluster) error { - ctx := context.TODO() - existingService, err := cc.serviceLister.Services(s.Namespace).Get(s.Name) - // If we get an error but without the IsNotFound error raised - // then something is wrong with the network, so requeue. - if err != nil && !apierrors.IsNotFound(err) { - return err - } - // If the service already exists, check that it's - // controlled by the given Cluster - if err == nil { - return util.VerifyOwner(existingService, c) - } - - // At this point, the Service doesn't exist, so we are free to create it - _, err = cc.kubeClient.CoreV1().Services(s.Namespace).Create(ctx, s, metav1.CreateOptions{}) - return err - -} - -func memberServiceForPod(pod *corev1.Pod, cluster *cassandrav1alpha1.Cluster) *corev1.Service { - - labels := util.ClusterLabels(cluster) - labels[constants.DatacenterNameLabel] = pod.Labels[constants.DatacenterNameLabel] - labels[constants.RackNameLabel] = pod.Labels[constants.RackNameLabel] - // If Member is seed, add the appropriate label - if strings.HasSuffix(pod.Name, "-0") || strings.HasSuffix(pod.Name, "-1") { - labels[constants.SeedLabel] = "" - } - - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - OwnerReferences: []metav1.OwnerReference{util.NewControllerRef(cluster)}, - Labels: labels, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: util.StatefulSetPodLabel(pod.Name), - Ports: []corev1.ServicePort{ - { - Name: "inter-node-communication", - Port: 7000, - }, - { - Name: "ssl-inter-node-communication", - Port: 7001, - }, - { - Name: "jmx-monitoring", - Port: 7199, - }, - { - Name: "cql", - Port: 9042, - }, - { - Name: "thrift", - Port: 9160, - }, - { - Name: "cql-ssl", - Port: 9142, - }, - }, - PublishNotReadyAddresses: true, - }, - } -} diff --git a/pkg/operator/cassandra/controller/sync.go b/pkg/operator/cassandra/controller/sync.go deleted file mode 100644 index 119c4269d0a0..000000000000 --- a/pkg/operator/cassandra/controller/sync.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - corev1 "k8s.io/api/core/v1" -) - -const ( - // SuccessSynced is used as part of the Event 'reason' when a Cluster is - // synced. - SuccessSynced = "Synced" - // ErrSyncFailed is used as part of the Event 'reason' when a - // Cluster fails to sync due to a resource of the same name already - // existing. - ErrSyncFailed = "ErrSyncFailed" - - MessageRackCreated = "Rack %s created" - MessageRackScaledUp = "Rack %s scaled up to %d members" - MessageRackScaleDownInProgress = "Rack %s scaling down to %d members" - MessageRackScaledDown = "Rack %s scaled down to %d members" - - // Messages to display when experiencing an error. - MessageHeadlessServiceSyncFailed = "Failed to sync Headless Service for cluster" - MessageMemberServicesSyncFailed = "Failed to sync MemberServices for cluster" - MessageUpdateStatusFailed = "Failed to update status for cluster" - MessageCleanupFailed = "Failed to clean up cluster resources" - MessageClusterSyncFailed = "Failed to sync cluster" -) - -// Sync attempts to sync the given Cassandra Cluster. -// NOTE: the Cluster Object is a DeepCopy. Modify at will. -func (cc *ClusterController) Sync(c *cassandrav1alpha1.Cluster) error { - - // Before syncing, ensure that all StatefulSets are up-to-date - stale, err := util.StatefulSetStatusesStale(c, cc.statefulSetLister) - if err != nil { - return err - } - if stale { - return nil - } - - // Cleanup Cluster resources - if err := cc.cleanup(c); err != nil { - cc.recorder.Event( - c, - corev1.EventTypeWarning, - ErrSyncFailed, - MessageCleanupFailed, - ) - } - - // Sync Headless Service for Cluster - if err := cc.syncClusterHeadlessService(c); err != nil { - cc.recorder.Event( - c, - corev1.EventTypeWarning, - ErrSyncFailed, - MessageHeadlessServiceSyncFailed, - ) - return err - } - - // Sync Cluster Member Services - if err := cc.syncMemberServices(c); err != nil { - cc.recorder.Event( - c, - corev1.EventTypeWarning, - ErrSyncFailed, - MessageMemberServicesSyncFailed, - ) - return err - } - - // Update Status - if err := cc.updateStatus(c); err != nil { - cc.recorder.Event( - c, - corev1.EventTypeWarning, - ErrSyncFailed, - MessageUpdateStatusFailed, - ) - return err - } - - // Sync Cluster - if err := cc.syncCluster(c); err != nil { - cc.recorder.Event( - c, - corev1.EventTypeWarning, - ErrSyncFailed, - MessageClusterSyncFailed, - ) - return err - } - - return nil -} diff --git a/pkg/operator/cassandra/controller/util/labels.go b/pkg/operator/cassandra/controller/util/labels.go deleted file mode 100644 index fd1b3309718e..000000000000 --- a/pkg/operator/cassandra/controller/util/labels.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/labels" -) - -// ClusterLabels returns a map of label keys and values -// for the given Cluster. -func ClusterLabels(c *cassandrav1alpha1.Cluster) map[string]string { - labels := recommendedLabels() - labels[constants.ClusterNameLabel] = c.Name - return labels -} - -// DatacenterLabels returns a map of label keys and values -// for the given Datacenter. -func DatacenterLabels(c *cassandrav1alpha1.Cluster) map[string]string { - recLabels := recommendedLabels() - dcLabels := ClusterLabels(c) - dcLabels[constants.DatacenterNameLabel] = c.Spec.Datacenter.Name - - return mergeLabels(dcLabels, recLabels) -} - -// RackLabels returns a map of label keys and values -// for the given Rack. -func RackLabels(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) map[string]string { - recLabels := recommendedLabels() - rackLabels := DatacenterLabels(c) - rackLabels[constants.RackNameLabel] = r.Name - - return mergeLabels(rackLabels, recLabels) -} - -// StatefulSetPodLabel returns a map of labels to uniquely -// identify a StatefulSet Pod with the given name -func StatefulSetPodLabel(name string) map[string]string { - return map[string]string{ - appsv1.StatefulSetPodNameLabel: name, - } -} - -// RackSelector returns a LabelSelector for the given rack. -func RackSelector(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) labels.Selector { - - rackLabelsSet := labels.Set(RackLabels(r, c)) - sel := labels.SelectorFromSet(rackLabelsSet) - - return sel -} - -func recommendedLabels() map[string]string { - - return map[string]string{ - "app": constants.AppName, - - "app.kubernetes.io/name": constants.AppName, - "app.kubernetes.io/managed-by": constants.OperatorAppName, - } -} - -func mergeLabels(l1, l2 map[string]string) map[string]string { - - res := make(map[string]string) - for k, v := range l1 { - res[k] = v - } - for k, v := range l2 { - res[k] = v - } - return res -} diff --git a/pkg/operator/cassandra/controller/util/patch.go b/pkg/operator/cassandra/controller/util/patch.go deleted file mode 100644 index 80c2f0d10c5a..000000000000 --- a/pkg/operator/cassandra/controller/util/patch.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "context" - "encoding/json" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/client/clientset/versioned" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/kubernetes" -) - -// PatchService patches the old Service so that it matches the -// new Service. -func PatchService(old, new *corev1.Service, kubeClient kubernetes.Interface) error { - ctx := context.TODO() - oldJSON, err := json.Marshal(old) - if err != nil { - return err - } - - newJSON, err := json.Marshal(new) - if err != nil { - return err - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldJSON, newJSON, corev1.Service{}) - if err != nil { - return err - } - - _, err = kubeClient.CoreV1().Services(old.Namespace).Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) - return err -} - -// PatchStatefulSet patches the old StatefulSet so that it matches the -// new StatefulSet. -func PatchStatefulSet(old, new *appsv1.StatefulSet, kubeClient kubernetes.Interface) error { - ctx := context.TODO() - oldJSON, err := json.Marshal(old) - if err != nil { - return err - } - - newJSON, err := json.Marshal(new) - if err != nil { - return err - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldJSON, newJSON, appsv1.StatefulSet{}) - if err != nil { - return err - } - - _, err = kubeClient.AppsV1().StatefulSets(old.Namespace).Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) - return err -} - -// PatchCluster patches the old Cluster so that it matches the new Cluster. -func PatchClusterStatus(c *cassandrav1alpha1.Cluster, rookClient versioned.Interface) error { - ctx := context.TODO() - // JSON Patch RFC 6902 - patch := []struct { - Op string `json:"op"` - Path string `json:"path"` - Value cassandrav1alpha1.ClusterStatus `json:"value"` - }{ - { - Op: "add", - Path: "/status", - Value: c.Status, - }, - } - - patchBytes, err := json.Marshal(patch) - if err != nil { - return err - } - _, err = rookClient.CassandraV1alpha1().Clusters(c.Namespace).Patch(ctx, c.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - return err - -} diff --git a/pkg/operator/cassandra/controller/util/resource.go b/pkg/operator/cassandra/controller/util/resource.go deleted file mode 100644 index f60016bc26ea..000000000000 --- a/pkg/operator/cassandra/controller/util/resource.go +++ /dev/null @@ -1,348 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func StatefulSetNameForRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) string { - return fmt.Sprintf("%s-%s-%s", c.Name, c.Spec.Datacenter.Name, r.Name) -} - -func ServiceAccountNameForMembers(c *cassandrav1alpha1.Cluster) string { - return fmt.Sprintf("%s-member", c.Name) -} - -func HeadlessServiceNameForCluster(c *cassandrav1alpha1.Cluster) string { - return fmt.Sprintf("%s-client", c.Name) -} - -func ImageForCluster(c *cassandrav1alpha1.Cluster) string { - - var repo string - - switch c.Spec.Mode { - case cassandrav1alpha1.ClusterModeScylla: - repo = "scylladb/scylla" - default: - repo = "cassandra" - } - - if c.Spec.Repository != nil { - repo = *c.Spec.Repository - } - return fmt.Sprintf("%s:%s", repo, c.Spec.Version) -} - -func StatefulSetForRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster, rookImage string) *appsv1.StatefulSet { - - rackLabels := RackLabels(r, c) - stsName := StatefulSetNameForRack(r, c) - - return &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: stsName, - Namespace: c.Namespace, - Labels: rackLabels, - OwnerReferences: []metav1.OwnerReference{NewControllerRef(c)}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: RefFromInt32(0), - // Use a common Headless Service for all StatefulSets - ServiceName: HeadlessServiceNameForCluster(c), - Selector: &metav1.LabelSelector{ - MatchLabels: rackLabels, - }, - PodManagementPolicy: appsv1.OrderedReadyPodManagement, - UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ - Type: appsv1.RollingUpdateStatefulSetStrategyType, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: rackLabels, - Annotations: map[string]string{ - "prometheus.io/scrape": "true", - "prometheus.io/port": "9180", - }, - }, - Spec: corev1.PodSpec{ - Volumes: volumesForRack(r), - InitContainers: []corev1.Container{ - { - Name: "rook-install", - Image: rookImage, - ImagePullPolicy: "IfNotPresent", - Command: []string{ - "/bin/sh", - "-c", - fmt.Sprintf("cp -a /sidecar/* %s", constants.SharedDirName), - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "shared", - MountPath: constants.SharedDirName, - ReadOnly: false, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "cassandra", - Image: ImageForCluster(c), - ImagePullPolicy: "IfNotPresent", - Ports: []corev1.ContainerPort{ - { - Name: "intra-node", - ContainerPort: 7000, - }, - { - Name: "tls-intra-node", - ContainerPort: 7001, - }, - { - Name: "jmx", - ContainerPort: 7199, - }, - { - Name: "cql", - ContainerPort: 9042, - }, - { - Name: "thrift", - ContainerPort: 9160, - }, - { - Name: "jolokia", - ContainerPort: 8778, - }, - { - Name: "prometheus", - ContainerPort: 9180, - }, - }, - // TODO: unprivileged entrypoint - Command: []string{ - fmt.Sprintf("%s/tini", constants.SharedDirName), - "--", - fmt.Sprintf("%s/rook", constants.SharedDirName), - }, - Args: []string{ - "cassandra", - "sidecar", - }, - Env: []corev1.EnvVar{ - { - Name: constants.PodIPEnvVar, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, - }, - { - Name: k8sutil.PodNameEnvVar, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - { - Name: k8sutil.PodNamespaceEnvVar, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: constants.ResourceLimitCPUEnvVar, - ValueFrom: &corev1.EnvVarSource{ - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: "cassandra", - Resource: "limits.cpu", - Divisor: resource.MustParse("1"), - }, - }, - }, - { - Name: constants.ResourceLimitMemoryEnvVar, - ValueFrom: &corev1.EnvVarSource{ - ResourceFieldRef: &corev1.ResourceFieldSelector{ - ContainerName: "cassandra", - Resource: "limits.memory", - Divisor: resource.MustParse("1Mi"), - }, - }, - }, - }, - Resources: r.Resources, - VolumeMounts: volumeMountsForRack(r, c), - LivenessProbe: &corev1.Probe{ - // Initial delay should be big, because scylla runs benchmarks - // to tune the IO settings. - InitialDelaySeconds: int32(400), - TimeoutSeconds: int32(5), - // TODO: Investigate if it's ok to call status every 10 seconds - PeriodSeconds: int32(10), - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(constants.ProbePort), - Path: constants.LivenessProbePath, - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - InitialDelaySeconds: int32(15), - TimeoutSeconds: int32(5), - // TODO: Investigate if it's ok to call status every 10 seconds - PeriodSeconds: int32(10), - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(constants.ProbePort), - Path: constants.ReadinessProbePath, - }, - }, - }, - // Before a Cassandra Pod is stopped, execute nodetool drain to - // flush the memtable to disk and stop listening for connections. - // This is necessary to ensure we don't lose any data. - Lifecycle: &corev1.Lifecycle{ - PreStop: &corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: []string{ - "nodetool", - "drain", - }, - }, - }, - }, - }, - }, - // Set GracePeriod to 2 days, should be enough even for the slowest of systems - TerminationGracePeriodSeconds: RefFromInt64(200000), - ServiceAccountName: ServiceAccountNameForMembers(c), - Affinity: affinityForRack(r), - Tolerations: tolerationsForRack(r), - }, - }, - VolumeClaimTemplates: volumeClaimTemplatesForRack(r.Storage.VolumeClaimTemplates), - }, - } -} - -// TODO: Maybe move this logic to a defaulter -func volumeClaimTemplatesForRack(claims []corev1.PersistentVolumeClaim) []corev1.PersistentVolumeClaim { - - if len(claims) == 0 { - return claims - } - - for i := range claims { - claims[i].Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - } - return claims -} - -// GetDataDir returns the directory used to store the database data -func GetDataDir(c *cassandrav1alpha1.Cluster) string { - if c.Spec.Mode == cassandrav1alpha1.ClusterModeScylla { - return constants.DataDirScylla - } - return constants.DataDirCassandra -} - -// volumeMountsForRack returns the VolumeMounts for that a Pod of the -// specific rack should have. Currently, it only supports 1 volume. -// If the user has specified more than 1 volumes, it only uses the -// first one. -// TODO: Modify to handle JBOD -func volumeMountsForRack(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) []corev1.VolumeMount { - - vm := []corev1.VolumeMount{ - { - Name: "shared", - MountPath: constants.SharedDirName, - ReadOnly: true, - }, - } - if r.JMXExporterConfigMapName != nil && *r.JMXExporterConfigMapName != "" { - vm = append(vm, corev1.VolumeMount{ - Name: "jmx-config", - MountPath: "/etc/cassandra/jmx_exporter_config.yaml", - SubPath: "jmx_exporter_config.yaml", - }) - } - if len(r.Storage.VolumeClaimTemplates) > 0 { - vm = append(vm, corev1.VolumeMount{ - Name: r.Storage.VolumeClaimTemplates[0].Name, - MountPath: GetDataDir(c), - }) - } - return vm -} - -func volumesForRack(r cassandrav1alpha1.RackSpec) []corev1.Volume { - volumes := []corev1.Volume{ - { - Name: "shared", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - } - if r.JMXExporterConfigMapName != nil && *r.JMXExporterConfigMapName != "" { - volumes = append(volumes, corev1.Volume{ - Name: "jmx-config", - VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: *r.JMXExporterConfigMapName}, - }}, - }) - } - return volumes -} - -func tolerationsForRack(r cassandrav1alpha1.RackSpec) []corev1.Toleration { - - if r.Placement == nil { - return nil - } - return r.Placement.Tolerations -} - -func affinityForRack(r cassandrav1alpha1.RackSpec) *corev1.Affinity { - - if r.Placement == nil { - return nil - } - - return &corev1.Affinity{ - PodAffinity: r.Placement.PodAffinity, - PodAntiAffinity: r.Placement.PodAntiAffinity, - NodeAffinity: r.Placement.NodeAffinity, - } -} diff --git a/pkg/operator/cassandra/controller/util/util.go b/pkg/operator/cassandra/controller/util/util.go deleted file mode 100644 index 2c6ed7b4b0f4..000000000000 --- a/pkg/operator/cassandra/controller/util/util.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "strconv" - "strings" - - cassandrarookio "github.com/rook/rook/pkg/apis/cassandra.rook.io" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/client-go/kubernetes" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" -) - -// GetPodsForCluster returns the existing Pods for -// the given cluster -func GetPodsForCluster(cluster *cassandrav1alpha1.Cluster, podLister corelisters.PodLister) ([]*corev1.Pod, error) { - - clusterRequirement, err := labels.NewRequirement(constants.ClusterNameLabel, selection.Equals, []string{cluster.Name}) - if err != nil { - return nil, fmt.Errorf("error trying to create clusterRequirement: %s", err.Error()) - } - clusterSelector := labels.NewSelector().Add(*clusterRequirement) - return podLister.Pods(cluster.Namespace).List(clusterSelector) - -} - -// GetMemberServicesForRack returns the member services for the given rack. -func GerMemberServicesForRack( - r cassandrav1alpha1.RackSpec, - c *cassandrav1alpha1.Cluster, - serviceLister corelisters.ServiceLister, -) ([]*corev1.Service, error) { - - sel := RackSelector(r, c) - return serviceLister.Services(c.Namespace).List(sel) -} - -// GetPodsForRack returns the created Pods for the given rack. -func GetPodsForRack( - r cassandrav1alpha1.RackSpec, - c *cassandrav1alpha1.Cluster, - podLister corelisters.PodLister, -) ([]*corev1.Pod, error) { - - sel := RackSelector(r, c) - return podLister.Pods(c.Namespace).List(sel) - -} - -// VerifyOwner checks if the owner Object is the controller -// of the obj Object and returns an error if it isn't. -func VerifyOwner(obj, owner metav1.Object) error { - if !metav1.IsControlledBy(obj, owner) { - ownerRef := metav1.GetControllerOf(obj) - return fmt.Errorf( - "'%s/%s' is foreign owned: "+ - "it is owned by '%v', not '%s/%s'.", - obj.GetNamespace(), obj.GetName(), - ownerRef, - owner.GetNamespace(), owner.GetName(), - ) - } - return nil -} - -// NewControllerRef returns an OwnerReference to -// the provided Cluster Object -func NewControllerRef(c *cassandrav1alpha1.Cluster) metav1.OwnerReference { - return *metav1.NewControllerRef(c, schema.GroupVersionKind{ - Group: cassandrarookio.CustomResourceGroupName, - Version: "v1alpha1", - Kind: "Cluster", - }) -} - -// RefFromString is a helper function that takes a string -// and outputs a reference to that string. -// Useful for initializing a string pointer from a literal. -func RefFromString(s string) *string { - return &s -} - -// RefFromInt is a helper function that takes a int -// and outputs a reference to that int. -// Useful for initializing an int pointer from a literal. -func RefFromInt(i int32) *int32 { - return &i -} - -// IndexFromName attempts to get the index from a name using the -// naming convention -. -func IndexFromName(n string) (int32, error) { - - // index := svc.Name[strings.LastIndex(svc.Name, "-") + 1 : len(svc.Name)] - delimIndex := strings.LastIndex(n, "-") - if delimIndex == -1 { - return -1, fmt.Errorf("couldn't get index from name %s", n) - } - - // #nosec G109 using Atoi to convert type into int is not a real risk - index, err := strconv.Atoi(n[delimIndex+1:]) - if err != nil { - return -1, fmt.Errorf("couldn't get index from name %s", n) - } - - return int32(index), nil -} - -// isPodUnschedulable iterates a Pod's Status.Conditions to find out -// if it has been deemed unschedulable -func IsPodUnschedulable(pod *corev1.Pod) bool { - for _, v := range pod.Status.Conditions { - if v.Reason == corev1.PodReasonUnschedulable { - return true - } - } - return false -} - -// RefFromInt32 is a helper function that takes a int32 -// and outputs a reference to that int. -func RefFromInt32(i int32) *int32 { - return &i -} - -// RefFromInt64 is a helper function that takes a int64 -// and outputs a reference to that int. -func RefFromInt64(i int64) *int64 { - return &i -} - -// Max returns the bigger of two given numbers -func Max(x, y int64) int64 { - if x < y { - return y - } - return x -} - -// Min returns the smaller of two given numbers -func Min(x, y int64) int64 { - if x < y { - return x - } - return y -} - -// ScaleStatefulSet attempts to scale a StatefulSet by the given amount -func ScaleStatefulSet(sts *appsv1.StatefulSet, amount int32, kubeClient kubernetes.Interface) error { - updatedSts := sts.DeepCopy() - updatedReplicas := *updatedSts.Spec.Replicas + amount - if updatedReplicas < 0 { - return fmt.Errorf("error, can't scale statefulset below 0 replicas") - } - updatedSts.Spec.Replicas = &updatedReplicas - err := PatchStatefulSet(sts, updatedSts, kubeClient) - return err -} - -// IsRackConditionTrue checks a rack's status for the presence of a condition type -// and checks if it is true. -func IsRackConditionTrue(rackStatus *cassandrav1alpha1.RackStatus, condType cassandrav1alpha1.RackConditionType) bool { - for _, cond := range rackStatus.Conditions { - if cond.Type == cassandrav1alpha1.RackConditionTypeMemberLeaving && cond.Status == cassandrav1alpha1.ConditionTrue { - return true - } - } - return false -} - -// StatefulSetStatusesStale checks if the StatefulSet Objects of a Cluster -// have been observed by the StatefulSet controller. -// If they haven't, their status might be stale, so it's better to wait -// and process them later. -func StatefulSetStatusesStale(c *cassandrav1alpha1.Cluster, statefulSetLister appslisters.StatefulSetLister) (bool, error) { - // Before proceeding, ensure all the Statefulset Statuses are valid - for _, r := range c.Spec.Datacenter.Racks { - if _, ok := c.Status.Racks[r.Name]; !ok { - continue - } - sts, err := statefulSetLister.StatefulSets(c.Namespace).Get(StatefulSetNameForRack(r, c)) - if err != nil { - return true, fmt.Errorf("error getting statefulset: %s", err.Error()) - } - if sts.Generation != sts.Status.ObservedGeneration { - return true, nil - } - } - return false, nil -} diff --git a/pkg/operator/cassandra/sidecar/checks.go b/pkg/operator/cassandra/sidecar/checks.go deleted file mode 100644 index 10874d4a3090..000000000000 --- a/pkg/operator/cassandra/sidecar/checks.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "fmt" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/yanniszark/go-nodetool/nodetool" - "net/http" -) - -// setupHTTPChecks brings up the liveness and readiness probes -func (m *MemberController) setupHTTPChecks() error { - - http.HandleFunc(constants.LivenessProbePath, livenessCheck(m)) - http.HandleFunc(constants.ReadinessProbePath, readinessCheck(m)) - - err := http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", constants.ProbePort), nil) - // If ListenAndServe returns, something went wrong - m.logger.Fatalf("Error in HTTP checks: %s", err.Error()) - return err - -} - -func livenessCheck(m *MemberController) func(http.ResponseWriter, *http.Request) { - - return func(w http.ResponseWriter, req *http.Request) { - - status := http.StatusOK - - // Check if JMX is reachable - _, err := m.nodetool.Status() - if err != nil { - m.logger.Errorf("Liveness check failed with error: %s", err.Error()) - status = http.StatusServiceUnavailable - } - - w.WriteHeader(status) - - } -} - -func readinessCheck(m *MemberController) func(http.ResponseWriter, *http.Request) { - - return func(w http.ResponseWriter, req *http.Request) { - - status := http.StatusOK - - err := func() error { - // Contact Cassandra to learn about the status of the member - HostIDMap, err := m.nodetool.Status() - if err != nil { - return fmt.Errorf("Error while executing nodetool status in readiness check: %s", err.Error()) - } - // Get local node through static ip - localNode, ok := HostIDMap[m.ip] - if !ok { - return fmt.Errorf("Couldn't find node with ip %s in nodetool status.", m.ip) - } - // Check local node status - // Up means the member is alive - if localNode.Status != nodetool.NodeStatusUp { - return fmt.Errorf("Unexpected local node status: %s", localNode.Status) - } - // Check local node state - // Normal means that the member has completed bootstrap and joined the cluster - if localNode.State != nodetool.NodeStateNormal { - return fmt.Errorf("Unexpected local node state: %s", localNode.State) - } - return nil - }() - - if err != nil { - m.logger.Errorf("Readiness check failed with error: %s", err.Error()) - status = http.StatusServiceUnavailable - } - - w.WriteHeader(status) - } - -} diff --git a/pkg/operator/cassandra/sidecar/config.go b/pkg/operator/cassandra/sidecar/config.go deleted file mode 100644 index bb8060330edb..000000000000 --- a/pkg/operator/cassandra/sidecar/config.go +++ /dev/null @@ -1,450 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - "time" - - "github.com/ghodss/yaml" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // Cassandra-Specific - configDirCassandra = "/etc/cassandra" - cassandraYAMLPath = configDirCassandra + "/" + "cassandra.yaml" - cassandraEnvPath = configDirCassandra + "/" + "cassandra-env.sh" - cassandraRackDCPropertiesPath = configDirCassandra + "/" + "cassandra-rackdc.properties" - - // Scylla-Specific - configDirScylla = "/etc/scylla" - scyllaYAMLPath = configDirScylla + "/" + "scylla.yaml" - scyllaRackDCPropertiesPath = configDirScylla + "/" + "cassandra-rackdc.properties" - scyllaJMXPath = "/usr/lib/scylla/jmx/scylla-jmx" - - // Common - jolokiaPath = constants.PluginDirName + "/" + "jolokia.jar" - - jmxExporterPath = constants.PluginDirName + "/" + "jmx_prometheus.jar" - jmxExporterConfigPath = configDirCassandra + "/" + "jmx_exporter_config.yaml" - jmxExporterPort = "9180" - - entrypointPath = "/entrypoint.sh" - rackDCPropertiesFormat = "dc=%s" + "\n" + "rack=%s" + "\n" + "prefer_local=false" + "\n" -) - -// generateConfigFiles injects the default configuration files -// with our custom values. -func (m *MemberController) generateConfigFiles() error { - - var err error - m.logger.Info("Generating config files") - - if m.mode == cassandrav1alpha1.ClusterModeScylla { - err = m.generateScyllaConfigFiles() - } else { - err = m.generateCassandraConfigFiles() - } - - return err -} - -// generateCassandraConfigFiles generates the necessary config files for Cassandra. -// Currently, those are: -// - cassandra.yaml -// - cassandra-env.sh -// - cassandra-rackdc.properties -// - entrypoint-sh -func (m *MemberController) generateCassandraConfigFiles() error { - - ///////////////////////////// - // Generate cassandra.yaml // - ///////////////////////////// - - // Read default cassandra.yaml - cassandraYAML, err := ioutil.ReadFile(cassandraYAMLPath) - if err != nil { - return fmt.Errorf("unexpected error trying to open cassandra.yaml: %s", err.Error()) - } - - customCassandraYAML, err := m.overrideConfigValues(cassandraYAML) - if err != nil { - return fmt.Errorf("error trying to override config values: %s", err.Error()) - } - - // Write result to file - if err = ioutil.WriteFile(cassandraYAMLPath, customCassandraYAML, os.ModePerm); err != nil { - m.logger.Errorf("error trying to write cassandra.yaml: %s", err.Error()) - return err - } - - ////////////////////////////////////////// - // Generate cassandra-rackdc.properties // - ////////////////////////////////////////// - - rackdcProperties := []byte(fmt.Sprintf(rackDCPropertiesFormat, m.datacenter, m.rack)) - if err = ioutil.WriteFile(cassandraRackDCPropertiesPath, rackdcProperties, os.ModePerm); err != nil { - return fmt.Errorf("error trying to write cassandra-rackdc.properties: %s", err.Error()) - } - - ///////////////////////////////////////// - // Generate cassandra-env.sh // - ///////////////////////////////////////// - - cassandraEnv, err := ioutil.ReadFile(cassandraEnvPath) - if err != nil { - return fmt.Errorf("error trying to open cassandra-env.sh, %s", err.Error()) - } - - // Calculate heap sizes - // https://github.com/apache/cassandra/blob/521542ff26f9482b733e4f0f86281f07c3af29da/conf/cassandra-env.sh - cpu := os.Getenv(constants.ResourceLimitCPUEnvVar) - if cpu == "" { - return fmt.Errorf("%s env variable not found", constants.ResourceLimitCPUEnvVar) - } - cpuNumber, _ := strconv.ParseInt(cpu, 10, 64) - mem := os.Getenv(constants.ResourceLimitMemoryEnvVar) - if mem == "" { - return fmt.Errorf("%s env variable not found", constants.ResourceLimitMemoryEnvVar) - } - memNumber, _ := strconv.ParseInt(mem, 10, 64) - maxHeapSize := util.Max(util.Min(memNumber/2, 1024), util.Min(memNumber/4, 8192)) - heapNewSize := util.Min(maxHeapSize/4, 100*cpuNumber) - if err := os.Setenv("MAX_HEAP_SIZE", fmt.Sprintf("%dM", maxHeapSize)); err != nil { - return fmt.Errorf("error setting MAX_HEAP_SIZE: %s", err.Error()) - } - if err := os.Setenv("HEAP_NEWSIZE", fmt.Sprintf("%dM", heapNewSize)); err != nil { - return fmt.Errorf("error setting HEAP_NEWSIZE: %s", err.Error()) - } - - // Generate jmx_agent_config - jmxConfig := "" - if _, err := os.Stat(jmxExporterConfigPath); !os.IsNotExist(err) { - jmxConfig = getJmxExporterConfig() - } - - agentsConfig := []byte(fmt.Sprintf(`JVM_OPTS="$JVM_OPTS %s %s"`, getJolokiaConfig(), jmxConfig)) - - err = ioutil.WriteFile(cassandraEnvPath, append(cassandraEnv, agentsConfig...), os.ModePerm) - if err != nil { - return fmt.Errorf("error trying to write cassandra-env.sh: %s", err.Error()) - } - - //////////////////////////// - // Generate entrypoint.sh // - //////////////////////////// - - entrypoint := "#!/bin/sh" + "\n" + "exec cassandra -f -R" - if err := ioutil.WriteFile(entrypointPath, []byte(entrypoint), os.ModePerm); err != nil { - return fmt.Errorf("error trying to write cassandra entrypoint: %s", err.Error()) - } - - return nil - -} - -// generateScyllaConfigFiles generates the necessary config files for Scylla. -// Currently, those are: -// - scylla.yaml -// - cassandra-rackdc.properties -// - scylla-jmx -// - entrypoint.sh -func (m *MemberController) generateScyllaConfigFiles() error { - - // TODO: remove scylla.yaml gen once the entrypoint script in scylla gets - // the necessary options - - ///////////////////////////// - // Generate scylla.yaml // - ///////////////////////////// - - // Read default scylla.yaml - scyllaYAML, err := ioutil.ReadFile(scyllaYAMLPath) - if err != nil { - return fmt.Errorf("unexpected error trying to open scylla.yaml: %s", err.Error()) - } - - customScyllaYAML, err := m.overrideConfigValues(scyllaYAML) - if err != nil { - return fmt.Errorf("error trying to override config values: %s", err.Error()) - } - - // Write result to file - if err = ioutil.WriteFile(scyllaYAMLPath, customScyllaYAML, os.ModePerm); err != nil { - m.logger.Errorf("error trying to write scylla.yaml: %s", err.Error()) - return err - } - - ////////////////////////////////////////// - // Generate cassandra-rackdc.properties // - ////////////////////////////////////////// - - rackdcProperties := []byte(fmt.Sprintf(rackDCPropertiesFormat, m.datacenter, m.rack)) - if err := ioutil.WriteFile(scyllaRackDCPropertiesPath, rackdcProperties, os.ModePerm); err != nil { - return fmt.Errorf("error trying to write cassandra-rackdc.properties: %s", err.Error()) - } - - ///////////////////////////////////////// - // Edit scylla-jmx with jolokia option // - ///////////////////////////////////////// - - scyllaJMXBytes, err := ioutil.ReadFile(scyllaJMXPath) - if err != nil { - return fmt.Errorf("error reading scylla-jmx: %s", err.Error()) - } - scyllaJMX := string(scyllaJMXBytes) - splitIndex := strings.Index(scyllaJMX, `\`) + len(`\`) - m.logger.Infof("Split index = %d", splitIndex) - injectedLine := fmt.Sprintf("\n %s \\", getJolokiaConfig()) - scyllaJMXCustom := scyllaJMX[:splitIndex] + injectedLine + scyllaJMX[splitIndex:] - if err := ioutil.WriteFile(scyllaJMXPath, []byte(scyllaJMXCustom), os.ModePerm); err != nil { - return fmt.Errorf("error writing scylla-jmx: %s", err.Error()) - } - - //////////////////////////// - // Generate entrypoint.sh // - //////////////////////////// - - entrypoint, err := m.scyllaEntrypoint() - if err != nil { - return fmt.Errorf("error creating scylla entrypoint: %s", err.Error()) - } - - m.logger.Infof("Scylla entrypoint script:\n %s", entrypoint) - if err := ioutil.WriteFile(entrypointPath, []byte(entrypoint), os.ModePerm); err != nil { - return fmt.Errorf("error trying to write scylla entrypoint: %s", err.Error()) - } - - return nil -} - -// scyllaEntrypoint returns the entrypoint script for scylla -func (m *MemberController) scyllaEntrypoint() (string, error) { - ctx := context.TODO() - // Get seeds - seeds, err := m.getSeeds() - if err != nil { - return "", fmt.Errorf("error getting seeds: %s", err.Error()) - } - - // Get local ip - localIP := os.Getenv(constants.PodIPEnvVar) - if localIP == "" { - return "", fmt.Errorf("POD_IP environment variable not set") - } - - // See if we need to run in developer mode - devMode := "0" - c, err := m.rookClient.CassandraV1alpha1().Clusters(m.namespace).Get(ctx, m.cluster, metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("error getting cluster: %s", err.Error()) - } - if val, ok := c.Annotations[constants.DeveloperModeAnnotation]; ok && val == constants.LabelValueTrue { - devMode = "1" - } - - // Get cpu cores - cpu := os.Getenv(constants.ResourceLimitCPUEnvVar) - if cpu == "" { - return "", fmt.Errorf("%s env variable not found", constants.ResourceLimitCPUEnvVar) - } - - // Get memory - mem := os.Getenv(constants.ResourceLimitMemoryEnvVar) - if mem == "" { - return "", fmt.Errorf("%s env variable not found", constants.ResourceLimitMemoryEnvVar) - } - // Leave some memory for other stuff - memNumber, _ := strconv.ParseInt(mem, 10, 64) - mem = fmt.Sprintf("%dM", util.Max(memNumber-700, 0)) - - opts := []struct { - flag, value string - }{ - { - flag: "listen-address", - value: localIP, - }, - { - flag: "broadcast-address", - value: m.ip, - }, - { - flag: "broadcast-rpc-address", - value: m.ip, - }, - { - flag: "seeds", - value: seeds, - }, - { - flag: "developer-mode", - value: devMode, - }, - { - flag: "smp", - value: cpu, - }, - { - flag: "memory", - value: mem, - }, - } - - entrypoint := "#!/bin/sh" + "\n" + "exec /docker-entrypoint.py" - for _, opt := range opts { - entrypoint = fmt.Sprintf("%s --%s %s", entrypoint, opt.flag, opt.value) - } - return entrypoint, nil -} - -// overrideConfigValues overrides the default config values with -// our custom values, for the fields that are of interest to us -func (m *MemberController) overrideConfigValues(configText []byte) ([]byte, error) { - - var config map[string]interface{} - - if err := yaml.Unmarshal(configText, &config); err != nil { - return nil, fmt.Errorf("error unmarshalling cassandra.yaml: %s", err.Error()) - } - - seeds, err := m.getSeeds() - if err != nil { - return nil, fmt.Errorf("error getting seeds: %s", err.Error()) - } - - localIP := os.Getenv(constants.PodIPEnvVar) - if localIP == "" { - return nil, fmt.Errorf("POD_IP environment variable not set") - } - - seedProvider := []map[string]interface{}{ - { - "class_name": "org.apache.cassandra.locator.SimpleSeedProvider", - "parameters": []map[string]interface{}{ - { - "seeds": seeds, - }, - }, - }, - } - - config["cluster_name"] = m.cluster - config["listen_address"] = localIP - config["broadcast_address"] = m.ip - config["rpc_address"] = "0.0.0.0" - config["broadcast_rpc_address"] = m.ip - config["endpoint_snitch"] = "GossipingPropertyFileSnitch" - config["seed_provider"] = seedProvider - - return yaml.Marshal(config) -} - -// getSeeds gets the IPs of the instances acting as Seeds -// in the Cluster. It does that by getting all ClusterIP services -// of the current Cluster with the cassandra.rook.io/seed label -func (m *MemberController) getSeeds() (string, error) { - ctx := context.TODO() - var services *corev1.ServiceList - var err error - - m.logger.Infof("Attempting to find seeds.") - sel := fmt.Sprintf("%s,%s=%s", constants.SeedLabel, constants.ClusterNameLabel, m.cluster) - - for { - services, err = m.kubeClient.CoreV1().Services(m.namespace).List(ctx, metav1.ListOptions{LabelSelector: sel}) - if err != nil { - return "", err - } - if len(services.Items) > 0 { - break - } - time.Sleep(1000 * time.Millisecond) - } - - seeds := []string{} - for _, svc := range services.Items { - seeds = append(seeds, svc.Spec.ClusterIP) - } - return strings.Join(seeds, ","), nil -} - -func getJolokiaConfig() string { - - opts := []struct { - flag, value string - }{ - { - flag: "host", - value: "localhost", - }, - { - flag: "port", - value: fmt.Sprintf("%d", constants.JolokiaPort), - }, - { - flag: "executor", - value: "fixed", - }, - { - flag: "threadNr", - value: "2", - }, - } - - cmd := []string{} - for _, opt := range opts { - cmd = append(cmd, fmt.Sprintf("%s=%s", opt.flag, opt.value)) - } - return fmt.Sprintf("-javaagent:%s=%s", jolokiaPath, strings.Join(cmd, ",")) -} - -func getJmxExporterConfig() string { - return fmt.Sprintf("-javaagent:%s=%s:%s", jmxExporterPath, jmxExporterPort, jmxExporterConfigPath) -} - -// Merge YAMLs merges two arbitrary YAML structures on the top level. -func mergeYAMLs(initialYAML, overrideYAML []byte) ([]byte, error) { - - var initial, override map[string]interface{} - if err := yaml.Unmarshal(initialYAML, &initial); err != nil { - return nil, fmt.Errorf("failed to unmarshal initial yaml. %v", err) - } - if err := yaml.Unmarshal(overrideYAML, &override); err != nil { - return nil, fmt.Errorf("failed to unmarshal override yaml. %v", err) - } - - if initial == nil { - initial = make(map[string]interface{}) - } - // Overwrite the values onto initial - for k, v := range override { - initial[k] = v - } - return yaml.Marshal(initial) - -} diff --git a/pkg/operator/cassandra/sidecar/config_test.go b/pkg/operator/cassandra/sidecar/config_test.go deleted file mode 100644 index dca8235e7a94..000000000000 --- a/pkg/operator/cassandra/sidecar/config_test.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "bytes" - "testing" -) - -func TestMergeYAMLs(t *testing.T) { - tests := []struct { - initial []byte - override []byte - result []byte - expectedErr bool - }{ - { - []byte("key: value"), - []byte("key: override_value"), - []byte("key: override_value\n"), - false, - }, - { - []byte("#comment"), - []byte("key: value"), - []byte("key: value\n"), - false, - }, - { - []byte("key: value"), - []byte("#comment"), - []byte("key: value\n"), - false, - }, - { - []byte("key1:\n nestedkey1: nestedvalue1"), - []byte("key1:\n nestedkey1: nestedvalue2"), - []byte("key1:\n nestedkey1: nestedvalue2\n"), - false, - }, - } - - for _, test := range tests { - result, err := mergeYAMLs(test.initial, test.override) - if !bytes.Equal(result, test.result) { - t.Errorf("Merge of '%s' and '%s' was incorrect,\n got: %s,\n want: %s.", - test.initial, test.override, result, test.result) - } - if err == nil && test.expectedErr { - t.Errorf("Expected error.") - } - if err != nil && !test.expectedErr { - t.Logf("Got an error as expected: %s", err.Error()) - } - } -} diff --git a/pkg/operator/cassandra/sidecar/sidecar.go b/pkg/operator/cassandra/sidecar/sidecar.go deleted file mode 100644 index 4452f45fd4bd..000000000000 --- a/pkg/operator/cassandra/sidecar/sidecar.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "context" - "fmt" - "net/url" - "os" - "os/exec" - "reflect" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/davecgh/go-spew/spew" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - rookClientset "github.com/rook/rook/pkg/client/clientset/versioned" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/yanniszark/go-nodetool/nodetool" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - coreinformers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" -) - -// MemberController encapsulates all the tools the sidecar needs to -// talk to the Kubernetes API -type MemberController struct { - // Metadata of the specific Member - name, namespace, ip string - cluster, datacenter, rack string - mode cassandrav1alpha1.ClusterMode - - // Clients and listers to handle Kubernetes Objects - kubeClient kubernetes.Interface - rookClient rookClientset.Interface - serviceLister corelisters.ServiceLister - serviceListerSynced cache.InformerSynced - - nodetool *nodetool.Nodetool - queue workqueue.RateLimitingInterface - logger *capnslog.PackageLogger -} - -// New return a new MemberController -func New( - name, namespace string, - kubeClient kubernetes.Interface, - rookClient rookClientset.Interface, - serviceInformer coreinformers.ServiceInformer, -) (*MemberController, error) { - ctx := context.TODO() - logger := capnslog.NewPackageLogger("github.com/rook/rook", "sidecar") - // Get the member's service - var memberService *corev1.Service - var err error - for { - memberService, err = kubeClient.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - logger.Infof("Something went wrong trying to get Member Service %s", name) - - } else if len(memberService.Spec.ClusterIP) > 0 { - break - } - // If something went wrong, wait a little and retry - time.Sleep(500 * time.Millisecond) - } - - // Get the Member's metadata from the Pod's labels - pod, err := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - // Create a new nodetool interface to talk to Cassandra - url, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d/jolokia/", constants.JolokiaPort)) - if err != nil { - return nil, err - } - nodetool := nodetool.NewFromURL(url) - - // Get the member's cluster - cluster, err := rookClient.CassandraV1alpha1().Clusters(namespace).Get(ctx, pod.Labels[constants.ClusterNameLabel], metav1.GetOptions{}) - if err != nil { - return nil, err - } - - m := &MemberController{ - name: name, - namespace: namespace, - ip: memberService.Spec.ClusterIP, - cluster: pod.Labels[constants.ClusterNameLabel], - datacenter: pod.Labels[constants.DatacenterNameLabel], - rack: pod.Labels[constants.RackNameLabel], - mode: cluster.Spec.Mode, - kubeClient: kubeClient, - rookClient: rookClient, - serviceLister: serviceInformer.Lister(), - serviceListerSynced: serviceInformer.Informer().HasSynced, - nodetool: nodetool, - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - logger: logger, - } - - serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - svc, ok := obj.(*corev1.Service) - if !ok { - return - } - if svc.Name != m.name { - logger.Errorf("Lister returned unexpected service %s", svc.Name) - return - } - m.enqueueMemberService(svc) - }, - UpdateFunc: func(old, new interface{}) { - oldService, ok := old.(*corev1.Service) - if !ok { - return - } - newService, ok := new.(*corev1.Service) - if !ok { - return - } - if oldService.ResourceVersion == newService.ResourceVersion { - return - } - if reflect.DeepEqual(oldService.Labels, newService.Labels) { - return - } - logger.Infof("New event for my MemberService %s", newService.Name) - m.enqueueMemberService(newService) - }, - DeleteFunc: func(obj interface{}) { - svc, ok := obj.(*corev1.Service) - if !ok { - return - } - if svc.Name == m.name { - logger.Errorf("Unexpected deletion of MemberService %s", svc.Name) - } - }, - }) - - return m, nil -} - -// Run starts executing the sync loop for the sidecar -func (m *MemberController) Run(threadiness int, stopCh <-chan struct{}) error { - - defer runtime.HandleCrash() - - if ok := cache.WaitForCacheSync(stopCh, m.serviceListerSynced); !ok { - return fmt.Errorf("failed to wait for caches to sync") - } - - if err := m.onStartup(); err != nil { - return fmt.Errorf("error on startup: %s", err.Error()) - } - - m.logger.Infof("Main event loop") - go wait.Until(m.runWorker, time.Second, stopCh) - - <-stopCh - m.logger.Info("Shutting down sidecar.") - return nil - -} - -func (m *MemberController) runWorker() { - for m.processNextWorkItem() { - } -} - -func (m *MemberController) processNextWorkItem() bool { - obj, shutdown := m.queue.Get() - - if shutdown { - return false - } - - err := func(obj interface{}) error { - defer m.queue.Done(obj) - key, ok := obj.(string) - if !ok { - m.queue.Forget(obj) - runtime.HandleError(fmt.Errorf("expected string in queue but got %#v", obj)) - } - if err := m.syncHandler(key); err != nil { - m.queue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s', requeueing: %s", key, err.Error()) - } - m.queue.Forget(obj) - m.logger.Infof("Successfully synced '%s'", key) - return nil - }(obj) - - if err != nil { - runtime.HandleError(err) - return true - } - - return true -} - -func (m *MemberController) syncHandler(key string) error { - // Convert the namespace/name string into a distinct namespace and name. - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) - return nil - } - - // Get the Cluster resource with this namespace/name - svc, err := m.serviceLister.Services(namespace).Get(name) - if err != nil { - // The Cluster resource may no longer exist, in which case we stop processing. - if apierrors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("member service '%s' in work queue no longer exists", key)) - return nil - } - return fmt.Errorf("unexpected error while getting member service object: %s", err) - } - - m.logger.Infof("handling member service object: %+v", spew.Sdump(svc)) - err = m.Sync(svc) - - return err -} - -// onStartup is executed before the MemberController starts -// its sync loop. -func (m *MemberController) onStartup() error { - - // Setup HTTP checks - m.logger.Info("Setting up HTTP Checks...") - go func() { - err := m.setupHTTPChecks() - m.logger.Fatalf("Error with HTTP Server: %s", err.Error()) - panic("Something went wrong with the HTTP Checks") - }() - - // Prepare config files for Cassandra - m.logger.Infof("Generating cassandra config files...") - if err := m.generateConfigFiles(); err != nil { - return fmt.Errorf("error generating config files: %s", err.Error()) - } - - // Start the database daemon - cmd := exec.Command(entrypointPath) - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - cmd.Env = os.Environ() - if err := cmd.Start(); err != nil { - m.logger.Errorf("error starting database daemon: %s", err.Error()) - return err - } - - return nil -} - -func (m *MemberController) enqueueMemberService(obj metav1.Object) { - var key string - var err error - if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - runtime.HandleError(err) - return - } - m.queue.AddRateLimited(key) -} diff --git a/pkg/operator/cassandra/sidecar/sync.go b/pkg/operator/cassandra/sidecar/sync.go deleted file mode 100644 index 4e5b91bfeafa..000000000000 --- a/pkg/operator/cassandra/sidecar/sync.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "fmt" - "github.com/rook/rook/pkg/operator/cassandra/constants" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - "github.com/yanniszark/go-nodetool/nodetool" - "k8s.io/api/core/v1" -) - -func (m *MemberController) Sync(memberService *v1.Service) error { - - // Check if member must decommission - if decommission, ok := memberService.Labels[constants.DecommissionLabel]; ok { - // Check if member has already decommissioned - if decommission == constants.LabelValueTrue { - return nil - } - // Else, decommission member - if err := m.nodetool.Decommission(); err != nil { - m.logger.Errorf("Error during decommission: %s", err.Error()) - } - // Confirm memberService has been decommissioned - if opMode, err := m.nodetool.OperationMode(); err != nil || opMode != nodetool.NodeOperationModeDecommissioned { - return fmt.Errorf("error during decommission, operation mode: %s, error: %v", opMode, err) - } - // Update Label - old := memberService.DeepCopy() - memberService.Labels[constants.DecommissionLabel] = constants.LabelValueTrue - if err := util.PatchService(old, memberService, m.kubeClient); err != nil { - return fmt.Errorf("error patching MemberService, %s", err.Error()) - } - - } - - return nil -} diff --git a/pkg/operator/cassandra/test/test.go b/pkg/operator/cassandra/test/test.go deleted file mode 100644 index a510fa9b2930..000000000000 --- a/pkg/operator/cassandra/test/test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "fmt" - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/cassandra/controller/util" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func NewSimpleCluster(members int32) *cassandrav1alpha1.Cluster { - return &cassandrav1alpha1.Cluster{ - TypeMeta: metav1.TypeMeta{ - APIVersion: cassandrav1alpha1.APIVersion, - Kind: "Cluster", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: "test-ns", - }, - Spec: cassandrav1alpha1.ClusterSpec{ - Version: "3.1.11", - Mode: cassandrav1alpha1.ClusterModeCassandra, - Datacenter: cassandrav1alpha1.DatacenterSpec{ - Name: "test-dc", - Racks: []cassandrav1alpha1.RackSpec{ - { - Name: "test-rack", - Members: members, - }, - }, - }, - }, - } -} - -// MemberServicesForCluster returns the member services for a given cluster -func MemberServicesForCluster(c *cassandrav1alpha1.Cluster) []runtime.Object { - - services := []runtime.Object{} - for _, r := range c.Spec.Datacenter.Racks { - for i := int32(0); i < c.Status.Racks[r.Name].Members; i++ { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-%s-%d", c.Name, c.Spec.Datacenter.Name, r.Name, i), - Namespace: c.Namespace, - Labels: util.RackLabels(r, c), - }, - } - services = append(services, svc) - } - } - return services -} diff --git a/pkg/operator/ceph/client/controller.go b/pkg/operator/ceph/client/controller.go index 948a008f7d55..ca1699110eee 100644 --- a/pkg/operator/ceph/client/controller.go +++ b/pkg/operator/ceph/client/controller.go @@ -156,7 +156,7 @@ func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Re } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deletePool() function since everything is gone already diff --git a/pkg/operator/ceph/cluster/cephstatus.go b/pkg/operator/ceph/cluster/cephstatus.go index 7428c74ca918..acec9a1e0bf7 100644 --- a/pkg/operator/ceph/cluster/cephstatus.go +++ b/pkg/operator/ceph/cluster/cephstatus.go @@ -28,6 +28,7 @@ import ( cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/reporting" cephver "github.com/rook/rook/pkg/operator/ceph/version" @@ -158,7 +159,8 @@ func (c *cephStatusChecker) configureHealthSettings(status cephclient.CephStatus if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED"]; ok { if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM"]; !ok { logger.Info("Disabling the insecure global ID as no legacy clients are currently connected. If you still require the insecure connections, see the CVE to suppress the health warning and re-enable the insecure connections. https://docs.ceph.com/en/latest/security/CVE-2021-20288/") - if _, err := cephclient.SetConfig(c.context, c.clusterInfo, "mon", "auth_allow_insecure_global_id_reclaim", "false", false); err != nil { + monStore := config.GetMonStore(c.context, c.clusterInfo) + if err := monStore.Set("mon", "auth_allow_insecure_global_id_reclaim", "false"); err != nil { logger.Warningf("failed to disable the insecure global ID. %v", err) } else { logger.Info("insecure global ID is now disabled") diff --git a/pkg/operator/ceph/cluster/cephstatus_test.go b/pkg/operator/ceph/cluster/cephstatus_test.go index 54ccd2b7514d..236adf7826b8 100644 --- a/pkg/operator/ceph/cluster/cephstatus_test.go +++ b/pkg/operator/ceph/cluster/cephstatus_test.go @@ -161,22 +161,17 @@ func TestConfigureHealthSettings(t *testing.T) { context: &clusterd.Context{}, clusterInfo: cephclient.AdminClusterInfo("ns"), } - getGlobalIDReclaim := false setGlobalIDReclaim := false c.context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) if args[0] == "config" && args[3] == "auth_allow_insecure_global_id_reclaim" { - if args[1] == "get" { - getGlobalIDReclaim = true - return "", nil - } if args[1] == "set" { setGlobalIDReclaim = true return "", nil } } - return "", errors.New("mock error to simulate failure of SetConfig() function") + return "", errors.New("mock error to simulate failure of mon store config") }, } noActionOneWarningStatus := cephclient.CephStatus{ @@ -224,24 +219,21 @@ func TestConfigureHealthSettings(t *testing.T) { type args struct { status cephclient.CephStatus - expectedGetGlobalIDSetting bool expectedSetGlobalIDSetting bool } tests := []struct { name string args args }{ - {"no-warnings", args{cephclient.CephStatus{}, false, false}}, - {"no-action-one-warning", args{noActionOneWarningStatus, false, false}}, - {"disable-insecure-global-id", args{disableInsecureGlobalIDStatus, true, true}}, - {"no-disable-insecure-global-id", args{noDisableInsecureGlobalIDStatus, false, false}}, + {"no-warnings", args{cephclient.CephStatus{}, false}}, + {"no-action-one-warning", args{noActionOneWarningStatus, false}}, + {"disable-insecure-global-id", args{disableInsecureGlobalIDStatus, true}}, + {"no-disable-insecure-global-id", args{noDisableInsecureGlobalIDStatus, false}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - getGlobalIDReclaim = false setGlobalIDReclaim = false c.configureHealthSettings(tt.args.status) - assert.Equal(t, tt.args.expectedGetGlobalIDSetting, getGlobalIDReclaim) assert.Equal(t, tt.args.expectedSetGlobalIDSetting, setGlobalIDReclaim) }) } diff --git a/pkg/operator/ceph/cluster/cleanup.go b/pkg/operator/ceph/cluster/cleanup.go index 41ed0ab5825c..c0ed852e2ca2 100644 --- a/pkg/operator/ceph/cluster/cleanup.go +++ b/pkg/operator/ceph/cluster/cleanup.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strconv" + "strings" "time" "github.com/pkg/errors" @@ -30,12 +31,13 @@ import ( "github.com/rook/rook/pkg/operator/ceph/cluster/rbd" "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/file/mds" + "github.com/rook/rook/pkg/operator/ceph/file/mirror" "github.com/rook/rook/pkg/operator/ceph/object" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util" batch "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" ) const ( @@ -207,10 +209,10 @@ func (c *ClusterController) waitForCephDaemonCleanUp(stopCleanupCh chan struct{} // getCephHosts returns a list of host names where ceph daemon pods are running func (c *ClusterController) getCephHosts(namespace string) ([]string, error) { ctx := context.TODO() - cephPodCount := map[string]int{} - cephAppNames := []string{mon.AppName, mgr.AppName, osd.AppName, object.AppName, mds.AppName, rbd.AppName} - nodeNameList := util.NewSet() + cephAppNames := []string{mon.AppName, mgr.AppName, osd.AppName, object.AppName, mds.AppName, rbd.AppName, mirror.AppName} + nodeNameList := sets.NewString() hostNameList := []string{} + var b strings.Builder // get all the node names where ceph daemons are running for _, app := range cephAppNames { @@ -221,16 +223,16 @@ func (c *ClusterController) getCephHosts(namespace string) ([]string, error) { } for _, cephPod := range podList.Items { podNodeName := cephPod.Spec.NodeName - if podNodeName != "" && !nodeNameList.Contains(podNodeName) { - nodeNameList.Add(podNodeName) + if podNodeName != "" && !nodeNameList.Has(podNodeName) { + nodeNameList.Insert(podNodeName) } } - cephPodCount[app] = len(podList.Items) + fmt.Fprintf(&b, "%s: %d. ", app, len(podList.Items)) } - logger.Infof("existing ceph daemons in the namespace %q: rook-ceph-mon: %d, rook-ceph-osd: %d, rook-ceph-mds: %d, rook-ceph-rgw: %d, rook-ceph-mgr: %d, rook-ceph-rbd-mirror: %d", - namespace, cephPodCount["rook-ceph-mon"], cephPodCount["rook-ceph-osd"], cephPodCount["rook-ceph-mds"], cephPodCount["rook-ceph-rgw"], cephPodCount["rook-ceph-mgr"], cephPodCount["rook-ceph-rbd-mirror"]) - for nodeName := range nodeNameList.Iter() { + logger.Infof("existing ceph daemons in the namespace %q. %s", namespace, b.String()) + + for nodeName := range nodeNameList { podHostName, err := k8sutil.GetNodeHostName(c.context.Clientset, nodeName) if err != nil { return nil, errors.Wrapf(err, "failed to get hostname from node %q", nodeName) diff --git a/pkg/operator/ceph/cluster/cluster.go b/pkg/operator/ceph/cluster/cluster.go index d5d6e11a0e68..e2cc23f38adb 100755 --- a/pkg/operator/ceph/cluster/cluster.go +++ b/pkg/operator/ceph/cluster/cluster.go @@ -88,13 +88,14 @@ func newCluster(c *cephv1.CephCluster, context *clusterd.Context, csiMutex *sync } } -func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVersion, spec *cephv1.ClusterSpec) error { +func (c *cluster) reconcileCephDaemons(rookImage string, cephVersion cephver.CephVersion) error { // Create a configmap for overriding ceph config settings // These settings should only be modified by a user after they are initialized err := populateConfigOverrideConfigMap(c.context, c.Namespace, c.ownerInfo) if err != nil { return errors.Wrap(err, "failed to populate config override config map") } + c.ClusterInfo.SetName(c.namespacedName.Name) // Start the mon pods controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mons") @@ -105,7 +106,7 @@ func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVers clusterInfo.OwnerInfo = c.ownerInfo clusterInfo.SetName(c.namespacedName.Name) c.ClusterInfo = clusterInfo - c.ClusterInfo.NetworkSpec = spec.Network + c.ClusterInfo.NetworkSpec = c.Spec.Network // The cluster Identity must be established at this point if !c.ClusterInfo.IsInitialized(true) { @@ -135,7 +136,7 @@ func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVers // Start Ceph manager controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mgr(s)") - mgrs := mgr.New(c.context, c.ClusterInfo, *spec, rookImage) + mgrs := mgr.New(c.context, c.ClusterInfo, *c.Spec, rookImage) err = mgrs.Start() if err != nil { return errors.Wrap(err, "failed to start ceph mgr") @@ -143,7 +144,7 @@ func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVers // Start the OSDs controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph OSDs") - osds := osd.New(c.context, c.ClusterInfo, *spec, rookImage) + osds := osd.New(c.context, c.ClusterInfo, *c.Spec, rookImage) err = osds.Start() if err != nil { return errors.Wrap(err, "failed to start ceph osds") @@ -151,7 +152,8 @@ func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVers // If a stretch cluster, enable the arbiter after the OSDs are created with the CRUSH map if c.Spec.IsStretchCluster() { - if err := c.mons.ConfigureArbiter(); err != nil { + failingOver := false + if err := c.mons.ConfigureArbiter(failingOver); err != nil { return errors.Wrap(err, "failed to configure stretch arbiter") } } @@ -169,10 +171,7 @@ func (c *cluster) doOrchestration(rookImage string, cephVersion cephver.CephVers return nil } -func (c *ClusterController) initializeCluster(cluster *cluster, clusterObj *cephv1.CephCluster) error { - ctx := context.TODO() - cluster.Spec = &clusterObj.Spec - +func (c *ClusterController) initializeCluster(cluster *cluster) error { // Check if the dataDirHostPath is located in the disallowed paths list cleanDataDirHostPath := path.Clean(cluster.Spec.DataDirHostPath) for _, b := range disallowedHostDirectories { @@ -203,7 +202,7 @@ func (c *ClusterController) initializeCluster(cluster *cluster, clusterObj *ceph // Test if the cluster has already been configured if the mgr deployment has been created. // If the mgr does not exist, the mons have never been verified to be in quorum. opts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k8sutil.AppAttr, mgr.AppName)} - mgrDeployments, err := c.context.Clientset.AppsV1().Deployments(cluster.Namespace).List(ctx, opts) + mgrDeployments, err := c.context.Clientset.AppsV1().Deployments(cluster.Namespace).List(context.TODO(), opts) if err == nil && len(mgrDeployments.Items) > 0 && cluster.ClusterInfo != nil { c.configureCephMonitoring(cluster, clusterInfo) } @@ -217,6 +216,7 @@ func (c *ClusterController) initializeCluster(cluster *cluster, clusterObj *ceph // Populate ClusterInfo with the last value cluster.mons.ClusterInfo = cluster.ClusterInfo + cluster.mons.ClusterInfo.SetName(c.namespacedName.Name) // Start the monitoring if not already started c.configureCephMonitoring(cluster, cluster.ClusterInfo) @@ -225,34 +225,31 @@ func (c *ClusterController) initializeCluster(cluster *cluster, clusterObj *ceph func (c *ClusterController) configureLocalCephCluster(cluster *cluster) error { // Cluster Spec validation - err := c.preClusterStartValidation(cluster) + err := preClusterStartValidation(cluster) if err != nil { return errors.Wrap(err, "failed to perform validation before cluster creation") } - // Pass down the client to interact with Kubernetes objects - // This will be used later down by spec code to create objects like deployment, services etc - cluster.context.Client = c.client - // Run image validation job controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Detecting Ceph version") cephVersion, isUpgrade, err := c.detectAndValidateCephVersion(cluster) if err != nil { return errors.Wrap(err, "failed the ceph version check") } + // Set the value of isUpgrade based on the image discovery done by detectAndValidateCephVersion() + cluster.isUpgrade = isUpgrade if cluster.Spec.IsStretchCluster() { - if !cephVersion.IsAtLeast(cephver.CephVersion{Major: 16, Minor: 2, Build: 5}) { - return errors.Errorf("stretch clusters minimum ceph version is v16.2.5, but is running %s", cephVersion.String()) + stretchVersion := cephver.CephVersion{Major: 16, Minor: 2, Build: 5} + if !cephVersion.IsAtLeast(stretchVersion) { + return errors.Errorf("stretch clusters minimum ceph version is %q, but is running %s", stretchVersion.String(), cephVersion.String()) } } - // Set the value of isUpgrade based on the image discovery done by detectAndValidateCephVersion() - cluster.isUpgrade = isUpgrade controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring the Ceph cluster") // Run the orchestration - err = cluster.doOrchestration(c.rookImage, *cephVersion, cluster.Spec) + err = cluster.reconcileCephDaemons(c.rookImage, *cephVersion) if err != nil { return errors.Wrap(err, "failed to create cluster") } @@ -351,18 +348,15 @@ func (c *cluster) notifyChildControllerOfUpgrade() error { } // Validate the cluster Specs -func (c *ClusterController) preClusterStartValidation(cluster *cluster) error { +func preClusterStartValidation(cluster *cluster) error { ctx := context.TODO() if cluster.Spec.Mon.Count == 0 { logger.Warningf("mon count should be at least 1, will use default value of %d", mon.DefaultMonCount) cluster.Spec.Mon.Count = mon.DefaultMonCount } - if cluster.Spec.Mon.Count%2 == 0 { - return errors.Errorf("mon count %d cannot be even, must be odd to support a healthy quorum", cluster.Spec.Mon.Count) - } if !cluster.Spec.Mon.AllowMultiplePerNode { // Check that there are enough nodes to have a chance of starting the requested number of mons - nodes, err := c.context.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + nodes, err := cluster.context.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err == nil && len(nodes.Items) < cluster.Spec.Mon.Count { return errors.Errorf("cannot start %d mons on %d node(s) when allowMultiplePerNode is false", cluster.Spec.Mon.Count, len(nodes.Items)) } @@ -390,7 +384,7 @@ func (c *ClusterController) preClusterStartValidation(cluster *cluster) error { } // Get network attachment definition - _, err := c.context.NetworkClient.NetworkAttachmentDefinitions(multusNamespace).Get(ctx, nad, metav1.GetOptions{}) + _, err := cluster.context.NetworkClient.NetworkAttachmentDefinitions(multusNamespace).Get(ctx, nad, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return errors.Wrapf(err, "specified network attachment definition for selector %q does not exist", selector) @@ -403,7 +397,7 @@ func (c *ClusterController) preClusterStartValidation(cluster *cluster) error { // Validate on-PVC cluster encryption KMS settings if cluster.Spec.Storage.IsOnPVCEncrypted() && cluster.Spec.Security.KeyManagementService.IsEnabled() { // Validate the KMS details - err := kms.ValidateConnectionDetails(c.context, cluster.Spec.Security, cluster.Namespace) + err := kms.ValidateConnectionDetails(cluster.context, &cluster.Spec.Security, cluster.Namespace) if err != nil { return errors.Wrap(err, "failed to validate kms connection details") } @@ -579,5 +573,11 @@ func (c *cluster) postMonStartupActions() error { } } + // Create cluster-wide RBD bootstrap peer token + _, err = controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.namespacedName.Name, Namespace: c.Namespace}}, c.ownerInfo) + if err != nil { + return errors.Wrap(err, "failed to create cluster rbd bootstrap peer token") + } + return nil } diff --git a/pkg/operator/ceph/cluster/cluster_test.go b/pkg/operator/ceph/cluster/cluster_test.go index adebd6405ba8..d121c79f6c2a 100644 --- a/pkg/operator/ceph/cluster/cluster_test.go +++ b/pkg/operator/ceph/cluster/cluster_test.go @@ -34,27 +34,27 @@ func TestPreClusterStartValidation(t *testing.T) { args args wantErr bool }{ - {"no settings", args{&cluster{Spec: &cephv1.ClusterSpec{}}}, false}, - {"even mons", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 2}}}}, true}, - {"missing stretch zones", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"no settings", args{&cluster{Spec: &cephv1.ClusterSpec{}, context: &clusterd.Context{Clientset: testop.New(t, 3)}}}, false}, + {"even mons", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 2}}}}, false}, + {"missing stretch zones", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a"}, }}}}}}, true}, - {"missing arbiter", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"missing arbiter", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a"}, {Name: "b"}, {Name: "c"}, }}}}}}, true}, - {"missing zone name", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"missing zone name", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Arbiter: true}, {Name: "b"}, {Name: "c"}, }}}}}}, true}, - {"valid stretch cluster", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 3, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"valid stretch cluster", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 3, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a", Arbiter: true}, {Name: "b"}, {Name: "c"}, }}}}}}, false}, - {"not enough stretch nodes", args{&cluster{Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 5, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"not enough stretch nodes", args{&cluster{context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 5, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a", Arbiter: true}, {Name: "b"}, {Name: "c"}, @@ -62,12 +62,7 @@ func TestPreClusterStartValidation(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &ClusterController{ - context: &clusterd.Context{ - Clientset: testop.New(t, 3), - }, - } - if err := c.preClusterStartValidation(tt.args.cluster); (err != nil) != tt.wantErr { + if err := preClusterStartValidation(tt.args.cluster); (err != nil) != tt.wantErr { t.Errorf("ClusterController.preClusterStartValidation() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go index 7769f4d88fdb..bf41cc1f84da 100644 --- a/pkg/operator/ceph/cluster/controller.go +++ b/pkg/operator/ceph/cluster/controller.go @@ -269,7 +269,7 @@ func (r *ReconcileCephCluster) reconcile(request reconcile.Request) (reconcile.R // Do reconcile here! ownerInfo := k8sutil.NewOwnerInfo(cephCluster, r.scheme) - if err := r.clusterController.onAdd(cephCluster, ownerInfo); err != nil { + if err := r.clusterController.reconcileCephCluster(cephCluster, ownerInfo); err != nil { return reconcile.Result{}, cephCluster, errors.Wrapf(err, "failed to reconcile cluster %q", cephCluster.Name) } @@ -356,7 +356,7 @@ func NewClusterController(context *clusterd.Context, rookImage string, volumeAtt } } -func (c *ClusterController) onAdd(clusterObj *cephv1.CephCluster, ownerInfo *k8sutil.OwnerInfo) error { +func (c *ClusterController) reconcileCephCluster(clusterObj *cephv1.CephCluster, ownerInfo *k8sutil.OwnerInfo) error { if clusterObj.Spec.CleanupPolicy.HasDataDirCleanPolicy() { logger.Infof("skipping orchestration for cluster object %q in namespace %q because its cleanup policy is set", clusterObj.Name, clusterObj.Namespace) return nil @@ -367,6 +367,14 @@ func (c *ClusterController) onAdd(clusterObj *cephv1.CephCluster, ownerInfo *k8s // It's a new cluster so let's populate the struct cluster = newCluster(clusterObj, c.context, c.csiConfigMutex, ownerInfo) } + cluster.namespacedName = c.namespacedName + + // Pass down the client to interact with Kubernetes objects + // This will be used later down by spec code to create objects like deployment, services etc + cluster.context.Client = c.client + + // Set the spec + cluster.Spec = &clusterObj.Spec // Note that this lock is held through the callback process, as this creates CSI resources, but we must lock in // this scope as the clusterMap is authoritative on cluster count and thus involved in the check for CSI resource @@ -383,7 +391,7 @@ func (c *ClusterController) onAdd(clusterObj *cephv1.CephCluster, ownerInfo *k8s c.csiConfigMutex.Unlock() // Start the main ceph cluster orchestration - return c.initializeCluster(cluster, clusterObj) + return c.initializeCluster(cluster) } func (c *ClusterController) requestClusterDelete(cluster *cephv1.CephCluster) (reconcile.Result, error) { diff --git a/pkg/operator/ceph/cluster/crash/crash.go b/pkg/operator/ceph/cluster/crash/crash.go index 8ff255ac4a85..be7d926dc7d3 100644 --- a/pkg/operator/ceph/cluster/crash/crash.go +++ b/pkg/operator/ceph/cluster/crash/crash.go @@ -96,6 +96,7 @@ func (r *ReconcileNode) createOrUpdateCephCrash(node corev1.Node, tolerations [] } deploy.ObjectMeta.Labels = deploymentLabels + cephv1.GetCrashCollectorLabels(cephCluster.Spec.Labels).ApplyToObjectMeta(&deploy.ObjectMeta) k8sutil.AddRookVersionLabelToDeployment(deploy) if cephVersion != nil { controller.AddCephVersionLabelToDeployment(*cephVersion, deploy) diff --git a/pkg/operator/ceph/cluster/mgr/dashboard.go b/pkg/operator/ceph/cluster/mgr/dashboard.go index a94f31e95fb8..42047133b070 100644 --- a/pkg/operator/ceph/cluster/mgr/dashboard.go +++ b/pkg/operator/ceph/cluster/mgr/dashboard.go @@ -21,7 +21,6 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" "os" "strconv" "syscall" @@ -29,8 +28,10 @@ import ( "github.com/pkg/errors" "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/ceph/config" cephver "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/rook/pkg/util" "github.com/rook/rook/pkg/util/exec" v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -111,17 +112,19 @@ func (c *Cluster) configureDashboardModules() error { } func (c *Cluster) configureDashboardModuleSettings(daemonID string) (bool, error) { + monStore := config.GetMonStore(c.context, c.clusterInfo) + daemonID = fmt.Sprintf("mgr.%s", daemonID) // url prefix - hasChanged, err := client.SetConfig(c.context, c.clusterInfo, daemonID, "mgr/dashboard/url_prefix", c.spec.Dashboard.URLPrefix, false) + hasChanged, err := monStore.SetIfChanged(daemonID, "mgr/dashboard/url_prefix", c.spec.Dashboard.URLPrefix) if err != nil { return false, err } // ssl support ssl := strconv.FormatBool(c.spec.Dashboard.SSL) - changed, err := client.SetConfig(c.context, c.clusterInfo, daemonID, "mgr/dashboard/ssl", ssl, false) + changed, err := monStore.SetIfChanged(daemonID, "mgr/dashboard/ssl", ssl) if err != nil { return false, err } @@ -129,7 +132,7 @@ func (c *Cluster) configureDashboardModuleSettings(daemonID string) (bool, error // server port port := strconv.Itoa(c.dashboardPort()) - changed, err = client.SetConfig(c.context, c.clusterInfo, daemonID, "mgr/dashboard/server_port", port, false) + changed, err = monStore.SetIfChanged(daemonID, "mgr/dashboard/server_port", port) if err != nil { return false, err } @@ -137,7 +140,7 @@ func (c *Cluster) configureDashboardModuleSettings(daemonID string) (bool, error // SSL enabled. Needed to set specifically the ssl port setting if c.spec.Dashboard.SSL { - changed, err = client.SetConfig(c.context, c.clusterInfo, daemonID, "mgr/dashboard/ssl_server_port", port, false) + changed, err = monStore.SetIfChanged(daemonID, "mgr/dashboard/ssl_server_port", port) if err != nil { return false, err } @@ -174,12 +177,12 @@ func (c *Cluster) initializeSecureDashboard() (bool, error) { } func (c *Cluster) createSelfSignedCert() (bool, error) { - // create a self-signed cert for the https connections required in mimic + // create a self-signed cert for the https connections args := []string{"dashboard", "create-self-signed-cert"} // retry a few times in the case that the mgr module is not ready to accept commands for i := 0; i < 5; i++ { - _, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandTimeout) + _, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) if err == context.DeadlineExceeded { logger.Warning("cert creation timed out. trying again") continue @@ -214,21 +217,6 @@ func FileBasedPasswordSupported(c *client.ClusterInfo) bool { return false } -func CreateTempPasswordFile(password string) (*os.File, error) { - // Generate a temp file - file, err := ioutil.TempFile("", "") - if err != nil { - return nil, errors.Wrap(err, "failed to generate temp file") - } - - // Write password into file - err = ioutil.WriteFile(file.Name(), []byte(password), 0440) - if err != nil { - return nil, errors.Wrap(err, "failed to write dashboard password into file") - } - return file, nil -} - func (c *Cluster) setLoginCredentials(password string) error { // Set the login credentials. Write the command/args to the debug log so we don't write the password by default to the log. logger.Infof("setting ceph dashboard %q login creds", dashboardUsername) @@ -237,7 +225,7 @@ func (c *Cluster) setLoginCredentials(password string) error { // for latest Ceph versions if FileBasedPasswordSupported(c.clusterInfo) { // Generate a temp file - file, err := CreateTempPasswordFile(password) + file, err := util.CreateTempFile(password) if err != nil { return errors.Wrap(err, "failed to create a temporary dashboard password file") } @@ -253,7 +241,7 @@ func (c *Cluster) setLoginCredentials(password string) error { } _, err := client.ExecuteCephCommandWithRetry(func() (string, []byte, error) { - output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandTimeout) + output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) return "set dashboard creds", output, err }, c.exitCode, 5, invalidArgErrorCode, dashboardInitWaitTime) if err != nil { diff --git a/pkg/operator/ceph/cluster/mgr/drain.go b/pkg/operator/ceph/cluster/mgr/drain.go new file mode 100644 index 000000000000..f4892ce771c2 --- /dev/null +++ b/pkg/operator/ceph/cluster/mgr/drain.go @@ -0,0 +1,120 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mgr + +import ( + "context" + + "github.com/pkg/errors" + "github.com/rook/rook/pkg/operator/k8sutil" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + mgrPDBName = "rook-ceph-mgr-pdb" +) + +func (c *Cluster) reconcileMgrPDB() error { + var maxUnavailable int32 = 1 + usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(c.context.Clientset) + if err != nil { + return errors.Wrap(err, "failed to fetch pdb version") + } + objectMeta := metav1.ObjectMeta{ + Name: mgrPDBName, + Namespace: c.clusterInfo.Namespace, + } + selector := &metav1.LabelSelector{ + MatchLabels: map[string]string{k8sutil.AppAttr: AppName}, + } + if usePDBV1Beta1 { + pdb := &policyv1beta1.PodDisruptionBudget{ + ObjectMeta: objectMeta, + } + mutateFunc := func() error { + pdb.Spec = policyv1beta1.PodDisruptionBudgetSpec{ + Selector: selector, + MaxUnavailable: &intstr.IntOrString{IntVal: maxUnavailable}, + } + return nil + } + op, err := controllerutil.CreateOrUpdate(context.TODO(), c.context.Client, pdb, mutateFunc) + if err != nil { + return errors.Wrapf(err, "failed to reconcile mgr pdb on op %q", op) + } + return nil + } + pdb := &policyv1.PodDisruptionBudget{ + ObjectMeta: objectMeta, + } + mutateFunc := func() error { + pdb.Spec = policyv1.PodDisruptionBudgetSpec{ + Selector: selector, + MaxUnavailable: &intstr.IntOrString{IntVal: maxUnavailable}, + } + return nil + } + op, err := controllerutil.CreateOrUpdate(context.TODO(), c.context.Client, pdb, mutateFunc) + if err != nil { + return errors.Wrapf(err, "failed to reconcile mgr pdb on op %q", op) + } + return nil +} + +func (c *Cluster) deleteMgrPDB() { + pdbRequest := types.NamespacedName{Name: mgrPDBName, Namespace: c.clusterInfo.Namespace} + usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(c.context.Clientset) + if err != nil { + logger.Errorf("failed to fetch pdb version. %v", err) + return + } + if usePDBV1Beta1 { + mgrPDB := &policyv1beta1.PodDisruptionBudget{} + err := c.context.Client.Get(context.TODO(), pdbRequest, mgrPDB) + if err != nil { + if !kerrors.IsNotFound(err) { + logger.Errorf("failed to get mgr pdb %q. %v", mgrPDBName, err) + } + return + } + logger.Debugf("ensuring the mgr pdb %q is deleted", mgrPDBName) + err = c.context.Client.Delete(context.TODO(), mgrPDB) + if err != nil { + logger.Errorf("failed to delete mgr pdb %q. %v", mgrPDBName, err) + return + } + } + mgrPDB := &policyv1.PodDisruptionBudget{} + err = c.context.Client.Get(context.TODO(), pdbRequest, mgrPDB) + if err != nil { + if !kerrors.IsNotFound(err) { + logger.Errorf("failed to get mgr pdb %q. %v", mgrPDBName, err) + } + return + } + logger.Debugf("ensuring the mgr pdb %q is deleted", mgrPDBName) + err = c.context.Client.Delete(context.TODO(), mgrPDB) + if err != nil { + logger.Errorf("failed to delete mgr pdb %q. %v", mgrPDBName, err) + } +} diff --git a/pkg/operator/ceph/cluster/mgr/drain_test.go b/pkg/operator/ceph/cluster/mgr/drain_test.go new file mode 100644 index 000000000000..1686fb7cf661 --- /dev/null +++ b/pkg/operator/ceph/cluster/mgr/drain_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mgr + +import ( + "context" + "testing" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + "github.com/rook/rook/pkg/clusterd" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/test" + "github.com/stretchr/testify/assert" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + mockNamespace = "test-ns" +) + +func createFakeCluster(t *testing.T, cephClusterObj *cephv1.CephCluster, k8sVersion string) *Cluster { + ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() + scheme := scheme.Scheme + err := policyv1.AddToScheme(scheme) + assert.NoError(t, err) + err = policyv1beta1.AddToScheme(scheme) + assert.NoError(t, err) + + cl := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects().Build() + clientset := test.New(t, 3) + clusterInfo := &cephclient.ClusterInfo{Namespace: mockNamespace, OwnerInfo: ownerInfo} + clusterInfo.SetName("test") + c := New(&clusterd.Context{Client: cl, Clientset: clientset}, clusterInfo, cephClusterObj.Spec, "myversion") + test.SetFakeKubernetesVersion(clientset, k8sVersion) + return c +} + +func TestReconcileMgrPDB(t *testing.T) { + testCases := struct { + name string + cephCluster *cephv1.CephCluster + expectedMaxUnAvailable int32 + errorExpected bool + }{ + name: "1 mgr", + cephCluster: &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "rook", Namespace: mockNamespace}, + Spec: cephv1.ClusterSpec{ + Mgr: cephv1.MgrSpec{ + Count: 1, + }, + DisruptionManagement: cephv1.DisruptionManagementSpec{ + ManagePodBudgets: true, + }, + }, + }, + expectedMaxUnAvailable: 1, + errorExpected: false, + } + + // check for PDBV1Beta1 version + c := createFakeCluster(t, testCases.cephCluster, "v1.20.0") + err := c.reconcileMgrPDB() + assert.NoError(t, err) + existingPDBV1Beta1 := &policyv1beta1.PodDisruptionBudget{} + err = c.context.Client.Get(context.TODO(), types.NamespacedName{Name: mgrPDBName, Namespace: mockNamespace}, existingPDBV1Beta1) + if testCases.errorExpected { + assert.Error(t, err) + } + assert.NoError(t, err) + assert.Equalf(t, testCases.expectedMaxUnAvailable, int32(existingPDBV1Beta1.Spec.MaxUnavailable.IntValue()), "[%s]: incorrect minAvailable count in pdb", testCases.name) + + // check for PDBV1 version + c = createFakeCluster(t, testCases.cephCluster, "v1.21.0") + err = c.reconcileMgrPDB() + assert.NoError(t, err) + existingPDBV1 := &policyv1.PodDisruptionBudget{} + err = c.context.Client.Get(context.TODO(), types.NamespacedName{Name: mgrPDBName, Namespace: mockNamespace}, existingPDBV1) + if testCases.errorExpected { + assert.Error(t, err) + } + assert.NoError(t, err) + assert.Equalf(t, testCases.expectedMaxUnAvailable, int32(existingPDBV1.Spec.MaxUnavailable.IntValue()), "[%s]: incorrect minAvailable count in pdb", testCases.name) + + // reconcile mon PDB again to test update + err = c.reconcileMgrPDB() + assert.NoError(t, err) +} + +func TestDeleteMgrPDB(t *testing.T) { + // check for PDBV1 version + fakeNamespaceName := types.NamespacedName{Namespace: mockNamespace, Name: mgrPDBName} + c := createFakeCluster(t, &cephv1.CephCluster{ + Spec: cephv1.ClusterSpec{ + DisruptionManagement: cephv1.DisruptionManagementSpec{ + ManagePodBudgets: true, + }, + }, + }, "v1.21.0") + err := c.reconcileMgrPDB() + assert.NoError(t, err) + existingPDBV1 := &policyv1.PodDisruptionBudget{} + // mgr PDB exist + err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1) + assert.NoError(t, err) + c.deleteMgrPDB() + // mgr PDB deleted + err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1) + assert.Error(t, err) + + // check for PDBV1Beta1 version + c = createFakeCluster(t, &cephv1.CephCluster{ + Spec: cephv1.ClusterSpec{ + DisruptionManagement: cephv1.DisruptionManagementSpec{ + ManagePodBudgets: true, + }, + }, + }, "v1.20.0") + err = c.reconcileMgrPDB() + assert.NoError(t, err) + existingPDBV1Beta1 := &policyv1beta1.PodDisruptionBudget{} + // mgr PDB exist + err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1Beta1) + assert.NoError(t, err) + c.deleteMgrPDB() + // mgr PDB deleted + err = c.context.Client.Get(context.TODO(), fakeNamespaceName, existingPDBV1Beta1) + assert.Error(t, err) +} diff --git a/pkg/operator/ceph/cluster/mgr/mgr.go b/pkg/operator/ceph/cluster/mgr/mgr.go index b925224feb96..9e0bf9292ab5 100644 --- a/pkg/operator/ceph/cluster/mgr/mgr.go +++ b/pkg/operator/ceph/cluster/mgr/mgr.go @@ -27,6 +27,7 @@ import ( "github.com/banzaicloud/k8s-objectmatcher/patch" "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" @@ -192,6 +193,14 @@ func (c *Cluster) Start() error { activeMgr = "" logger.Infof("cannot reconcile mgr services, no active mgr found. err=%v", err) } + + // reconcile mgr PDB + if err := c.reconcileMgrPDB(); err != nil { + return errors.Wrap(err, "failed to reconcile mgr PDB") + } + } else { + // delete MGR PDB as the count is less than 2 + c.deleteMgrPDB() } if activeMgr != "" { if err := c.reconcileServices(activeMgr); err != nil { @@ -356,14 +365,18 @@ func (c *Cluster) enableCrashModule() error { func (c *Cluster) enableBalancerModule() error { // The order MATTERS, always configure this module first, then turn it on - // This sets min compat client to luminous and the balancer module mode - err := cephclient.ConfigureBalancerModule(c.context, c.clusterInfo, balancerModuleMode) - if err != nil { - return errors.Wrapf(err, "failed to configure module %q", balancerModuleName) + // This enables the balancer module mode only in versions older than Pacific + // This let's the user change the default mode if desired + if !c.clusterInfo.CephVersion.IsAtLeastPacific() { + // This sets min compat client to luminous and the balancer module mode + err := cephclient.ConfigureBalancerModule(c.context, c.clusterInfo, balancerModuleMode) + if err != nil { + return errors.Wrapf(err, "failed to configure module %q", balancerModuleName) + } } // This turns "on" the balancer - err = cephclient.MgrEnableModule(c.context, c.clusterInfo, balancerModuleName, false) + err := cephclient.MgrEnableModule(c.context, c.clusterInfo, balancerModuleName, false) if err != nil { return errors.Wrapf(err, "failed to turn on mgr %q module", balancerModuleName) } @@ -456,7 +469,7 @@ func (c *Cluster) EnableServiceMonitor(activeDaemon string) error { } serviceMonitor.SetName(AppName) serviceMonitor.SetNamespace(c.clusterInfo.Namespace) - cephv1.GetMonitoringLabels(c.spec.Labels).ApplyToObjectMeta(&serviceMonitor.ObjectMeta) + cephv1.GetMonitoringLabels(c.spec.Labels).OverwriteApplyToObjectMeta(&serviceMonitor.ObjectMeta) if c.spec.External.Enable { serviceMonitor.Spec.Endpoints[0].Port = controller.ServiceExternalMetricName @@ -467,6 +480,9 @@ func (c *Cluster) EnableServiceMonitor(activeDaemon string) error { } serviceMonitor.Spec.NamespaceSelector.MatchNames = []string{c.clusterInfo.Namespace} serviceMonitor.Spec.Selector.MatchLabels = c.selectorLabels(activeDaemon) + + applyMonitoringLabels(c, serviceMonitor) + if _, err = k8sutil.CreateOrUpdateServiceMonitor(serviceMonitor); err != nil { return errors.Wrap(err, "service monitor could not be enabled") } @@ -489,7 +505,7 @@ func (c *Cluster) DeployPrometheusRule(name, namespace string) error { if err != nil { return errors.Wrapf(err, "failed to set owner reference to prometheus rule %q", prometheusRule.Name) } - cephv1.GetMonitoringLabels(c.spec.Labels).ApplyToObjectMeta(&prometheusRule.ObjectMeta) + cephv1.GetMonitoringLabels(c.spec.Labels).OverwriteApplyToObjectMeta(&prometheusRule.ObjectMeta) if _, err := k8sutil.CreateOrUpdatePrometheusRule(prometheusRule); err != nil { return errors.Wrap(err, "prometheus rule could not be deployed") } @@ -506,3 +522,24 @@ func IsModuleInSpec(modules []cephv1.Module, moduleName string) bool { return false } + +// ApplyMonitoringLabels function adds the name of the resource that manages +// cephcluster, as a label on the ceph metrics +func applyMonitoringLabels(c *Cluster, serviceMonitor *monitoringv1.ServiceMonitor) { + if c.spec.Labels != nil { + if monitoringLabels, ok := c.spec.Labels["monitoring"]; ok { + if managedBy, ok := monitoringLabels["rook.io/managedBy"]; ok { + relabelConfig := monitoringv1.RelabelConfig{ + TargetLabel: "managedBy", + Replacement: managedBy, + } + serviceMonitor.Spec.Endpoints[0].RelabelConfigs = append( + serviceMonitor.Spec.Endpoints[0].RelabelConfigs, &relabelConfig) + } else { + logger.Info("rook.io/managedBy not specified in monitoring labels") + } + } else { + logger.Info("monitoring labels not specified") + } + } +} diff --git a/pkg/operator/ceph/cluster/mgr/mgr_test.go b/pkg/operator/ceph/cluster/mgr/mgr_test.go index 61326b944de8..4d302d990705 100644 --- a/pkg/operator/ceph/cluster/mgr/mgr_test.go +++ b/pkg/operator/ceph/cluster/mgr/mgr_test.go @@ -22,13 +22,16 @@ import ( "io/ioutil" "os" "testing" + "time" + "github.com/pkg/errors" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/apis/rook.io" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" "github.com/rook/rook/pkg/clusterd" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" cephver "github.com/rook/rook/pkg/operator/ceph/version" - testopk8s "github.com/rook/rook/pkg/operator/k8sutil/test" testop "github.com/rook/rook/pkg/operator/test" exectest "github.com/rook/rook/pkg/util/exec/test" @@ -36,8 +39,11 @@ import ( "github.com/stretchr/testify/require" "github.com/tevino/abool" apps "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestStartMgr(t *testing.T) { @@ -60,12 +66,20 @@ func TestStartMgr(t *testing.T) { clientset := testop.New(t, 3) configDir, _ := ioutil.TempDir("", "") + scheme := scheme.Scheme + err := policyv1.AddToScheme(scheme) + assert.NoError(t, err) + err = policyv1beta1.AddToScheme(scheme) + assert.NoError(t, err) + cl := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects().Build() + defer os.RemoveAll(configDir) ctx := &clusterd.Context{ Executor: executor, ConfigDir: configDir, Clientset: clientset, - RequestCancelOrchestration: abool.New()} + RequestCancelOrchestration: abool.New(), + Client: cl} ownerInfo := cephclient.NewMinimumOwnerInfo(t) clusterInfo := &cephclient.ClusterInfo{Namespace: "ns", FSID: "myfsid", OwnerInfo: ownerInfo, CephVersion: cephver.CephVersion{Major: 16, Minor: 2, Build: 5}} clusterInfo.SetName("test") @@ -81,7 +95,7 @@ func TestStartMgr(t *testing.T) { defer os.RemoveAll(c.spec.DataDirHostPath) // start a basic service - err := c.Start() + err = c.Start() assert.Nil(t, err) validateStart(t, c) assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) @@ -160,7 +174,7 @@ func validateServices(t *testing.T, c *Cluster) { assert.Equal(t, ds.Spec.Ports[0].Port, int32(c.spec.Dashboard.Port)) } } else { - assert.True(t, errors.IsNotFound(err)) + assert.True(t, kerrors.IsNotFound(err)) } } @@ -216,7 +230,7 @@ func TestMgrSidecarReconcile(t *testing.T) { assert.True(t, calledMgrStat) assert.False(t, calledMgrDump) _, err = c.context.Clientset.CoreV1().Services(c.clusterInfo.Namespace).Get(context.TODO(), "rook-ceph-mgr", metav1.GetOptions{}) - assert.True(t, errors.IsNotFound(err)) + assert.True(t, kerrors.IsNotFound(err)) // nothing is updated when the requested mgr is not the active mgr activeMgr = "b" @@ -257,12 +271,15 @@ func TestConfigureModules(t *testing.T) { } lastModuleConfigured = args[3] } - if args[0] == "config" && args[1] == "set" && args[2] == "global" { - configSettings[args[3]] = args[4] - } } return "", nil //return "{\"key\":\"mysecurekey\"}", nil }, + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { + if args[0] == "config" && args[1] == "set" && args[2] == "global" { + configSettings[args[3]] = args[4] + } + return "", nil + }, } clientset := testop.New(t, 3) @@ -328,3 +345,94 @@ func TestMgrDaemons(t *testing.T) { assert.Equal(t, "a", daemons[0]) assert.Equal(t, "b", daemons[1]) } + +func TestApplyMonitoringLabels(t *testing.T) { + clusterSpec := cephv1.ClusterSpec{ + Labels: cephv1.LabelsSpec{}, + } + c := &Cluster{spec: clusterSpec} + sm := &monitoringv1.ServiceMonitor{Spec: monitoringv1.ServiceMonitorSpec{ + Endpoints: []monitoringv1.Endpoint{{}}}} + + // Service Monitor RelabelConfigs updated when 'rook.io/managedBy' monitoring label is found + monitoringLabels := cephv1.LabelsSpec{ + cephv1.KeyMonitoring: map[string]string{ + "rook.io/managedBy": "storagecluster"}, + } + c.spec.Labels = monitoringLabels + applyMonitoringLabels(c, sm) + fmt.Printf("Hello1") + assert.Equal(t, "managedBy", sm.Spec.Endpoints[0].RelabelConfigs[0].TargetLabel) + assert.Equal(t, "storagecluster", sm.Spec.Endpoints[0].RelabelConfigs[0].Replacement) + + // Service Monitor RelabelConfigs not updated when the required monitoring label is not found + monitoringLabels = cephv1.LabelsSpec{ + cephv1.KeyMonitoring: map[string]string{ + "wrongLabelKey": "storagecluster"}, + } + c.spec.Labels = monitoringLabels + sm.Spec.Endpoints[0].RelabelConfigs = nil + applyMonitoringLabels(c, sm) + assert.Nil(t, sm.Spec.Endpoints[0].RelabelConfigs) + + // Service Monitor RelabelConfigs not updated when no monitoring labels are found + c.spec.Labels = cephv1.LabelsSpec{} + sm.Spec.Endpoints[0].RelabelConfigs = nil + applyMonitoringLabels(c, sm) + assert.Nil(t, sm.Spec.Endpoints[0].RelabelConfigs) +} + +func TestCluster_enableBalancerModule(t *testing.T) { + c := &Cluster{ + context: &clusterd.Context{Executor: &exectest.MockExecutor{}, Clientset: testop.New(t, 3)}, + clusterInfo: cephclient.AdminClusterInfo("mycluster"), + } + + t.Run("on octopus we configure the balancer AND enable the upmap mode", func(t *testing.T) { + c.clusterInfo.CephVersion = cephver.Octopus + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + if command == "ceph" { + if args[0] == "osd" && args[1] == "set-require-min-compat-client" { + return "", nil + } + if args[0] == "balancer" && args[1] == "mode" { + return "", nil + } + if args[0] == "balancer" && args[1] == "on" { + return "", nil + } + } + return "", errors.New("unknown command") + }, + } + c.context.Executor = executor + err := c.enableBalancerModule() + assert.NoError(t, err) + }) + + t.Run("on pacific we configure the balancer ONLY and don't set a mode", func(t *testing.T) { + c.clusterInfo.CephVersion = cephver.Pacific + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + if command == "ceph" { + if args[0] == "osd" && args[1] == "set-require-min-compat-client" { + return "", nil + } + if args[0] == "balancer" && args[1] == "mode" { + return "", errors.New("balancer mode must not be set") + } + if args[0] == "balancer" && args[1] == "on" { + return "", nil + } + } + return "", errors.New("unknown command") + }, + } + c.context.Executor = executor + err := c.enableBalancerModule() + assert.NoError(t, err) + }) +} diff --git a/pkg/operator/ceph/cluster/mgr/orchestrator.go b/pkg/operator/ceph/cluster/mgr/orchestrator.go index 88fcf4de1241..42ce382e7009 100644 --- a/pkg/operator/ceph/cluster/mgr/orchestrator.go +++ b/pkg/operator/ceph/cluster/mgr/orchestrator.go @@ -54,7 +54,7 @@ func (c *Cluster) setRookOrchestratorBackend() error { // retry a few times in the case that the mgr module is not ready to accept commands _, err := client.ExecuteCephCommandWithRetry(func() (string, []byte, error) { args := []string{orchestratorCLIName, "set", "backend", "rook"} - output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandTimeout) + output, err := client.NewCephCommand(c.context, c.clusterInfo, args).RunWithTimeout(exec.CephCommandsTimeout) return "set rook backend", output, err }, c.exitCode, 5, invalidArgErrorCode, orchestratorInitWaitTime) if err != nil { diff --git a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go b/pkg/operator/ceph/cluster/mgr/orchestrator_test.go index 043c44ae0bae..66cf8ab5691e 100644 --- a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go +++ b/pkg/operator/ceph/cluster/mgr/orchestrator_test.go @@ -23,6 +23,7 @@ import ( "github.com/rook/rook/pkg/clusterd" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" cephver "github.com/rook/rook/pkg/operator/ceph/version" + "github.com/rook/rook/pkg/util/exec" exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" ) @@ -33,6 +34,7 @@ func TestOrchestratorModules(t *testing.T) { rookModuleEnabled := false rookBackendSet := false backendErrorCount := 0 + exec.CephCommandsTimeout = 15 * time.Second executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) if args[0] == "mgr" && args[1] == "module" && args[2] == "enable" { diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go index 9e211402652e..352f61ab3ced 100644 --- a/pkg/operator/ceph/cluster/mgr/spec.go +++ b/pkg/operator/ceph/cluster/mgr/spec.go @@ -33,7 +33,6 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -186,7 +185,7 @@ func (c *Cluster) makeMgrDaemonContainer(mgrConfig *mgrConfig) v1.Container { ), Resources: cephv1.GetMgrResources(c.spec.Resources), SecurityContext: controller.PodSecurityContext(), - LivenessProbe: getDefaultMgrLivenessProbe(), + LivenessProbe: controller.GenerateLivenessProbeExecDaemon(config.MgrType, mgrConfig.DaemonID), WorkingDir: config.VarLogCephDir, } @@ -254,18 +253,6 @@ func (c *Cluster) makeCmdProxySidecarContainer(mgrConfig *mgrConfig) v1.Containe return container } -func getDefaultMgrLivenessProbe() *v1.Probe { - return &v1.Probe{ - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(int(DefaultMetricsPort)), - }, - }, - InitialDelaySeconds: 60, - } -} - // MakeMetricsService generates the Kubernetes service object for the monitoring service func (c *Cluster) MakeMetricsService(name, activeDaemon, servicePortMetricName string) (*v1.Service, error) { labels := c.selectorLabels(activeDaemon) diff --git a/pkg/operator/ceph/cluster/mon/health.go b/pkg/operator/ceph/cluster/mon/health.go index 4deb8f9165eb..f535d1307d23 100644 --- a/pkg/operator/ceph/cluster/mon/health.go +++ b/pkg/operator/ceph/cluster/mon/health.go @@ -19,13 +19,16 @@ package mon import ( "context" "fmt" + "os" "strings" "time" "github.com/pkg/errors" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" cephutil "github.com/rook/rook/pkg/daemon/ceph/util" "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/k8sutil" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,6 +45,8 @@ var ( timeZero = time.Duration(0) // Check whether mons are on the same node once per operator restart since it's a rare scheduling condition needToCheckMonsOnSameNode = true + // Version of Ceph where the arbiter failover is supported + arbiterFailoverSupportedCephVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 7} ) // HealthChecker aggregates the mon/cluster info needed to check the health of the monitors @@ -50,36 +55,69 @@ type HealthChecker struct { interval time.Duration } -// NewHealthChecker creates a new HealthChecker object -func NewHealthChecker(monCluster *Cluster) *HealthChecker { - h := &HealthChecker{ - monCluster: monCluster, - interval: HealthCheckInterval, - } - - monCRDTimeoutSetting := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Timeout - if monCRDTimeoutSetting != "" { - if monTimeout, err := time.ParseDuration(monCRDTimeoutSetting); err == nil { - if monTimeout == timeZero { - logger.Warning("monitor failover is disabled") +func updateMonTimeout(monCluster *Cluster) { + // If the env was passed by the operator config, use that value + // This is an old behavior where we maintain backward compatibility + monTimeoutEnv := os.Getenv("ROOK_MON_OUT_TIMEOUT") + if monTimeoutEnv != "" { + parsedInterval, err := time.ParseDuration(monTimeoutEnv) + // We ignore the error here since the default is 10min and it's unlikely to be a problem + if err == nil { + MonOutTimeout = parsedInterval + } + // No env var, let's use the CR value if any + } else { + monCRDTimeoutSetting := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Timeout + if monCRDTimeoutSetting != "" { + if monTimeout, err := time.ParseDuration(monCRDTimeoutSetting); err == nil { + if monTimeout == timeZero { + logger.Warning("monitor failover is disabled") + } + MonOutTimeout = monTimeout } - MonOutTimeout = monTimeout } } + // A third case is when the CRD is not set, in which case we use the default from MonOutTimeout +} - checkInterval := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Interval - // allow overriding the check interval - if checkInterval != nil { - logger.Infof("ceph mon status in namespace %q check interval %q", monCluster.Namespace, checkInterval.Duration.String()) - h.interval = checkInterval.Duration +func updateMonInterval(monCluster *Cluster, h *HealthChecker) { + // If the env was passed by the operator config, use that value + // This is an old behavior where we maintain backward compatibility + healthCheckIntervalEnv := os.Getenv("ROOK_MON_HEALTHCHECK_INTERVAL") + if healthCheckIntervalEnv != "" { + parsedInterval, err := time.ParseDuration(healthCheckIntervalEnv) + // We ignore the error here since the default is 45s and it's unlikely to be a problem + if err == nil { + h.interval = parsedInterval + } + // No env var, let's use the CR value if any + } else { + checkInterval := monCluster.spec.HealthCheck.DaemonHealth.Monitor.Interval + // allow overriding the check interval + if checkInterval != nil { + logger.Debugf("ceph mon status in namespace %q check interval %q", monCluster.Namespace, checkInterval.Duration.String()) + h.interval = checkInterval.Duration + } } + // A third case is when the CRD is not set, in which case we use the default from HealthCheckInterval +} +// NewHealthChecker creates a new HealthChecker object +func NewHealthChecker(monCluster *Cluster) *HealthChecker { + h := &HealthChecker{ + monCluster: monCluster, + interval: HealthCheckInterval, + } return h } // Check periodically checks the health of the monitors func (hc *HealthChecker) Check(stopCh chan struct{}) { for { + // Update Mon Timeout with CR details + updateMonTimeout(hc.monCluster) + // Update Mon Interval with CR details + updateMonInterval(hc.monCluster, hc) select { case <-stopCh: logger.Infof("stopping monitoring of mons in namespace %q", hc.monCluster.Namespace) @@ -227,7 +265,10 @@ func (c *Cluster) checkHealth() error { retriesBeforeNodeDrainFailover = 1 logger.Warningf("mon %q NOT found in quorum and timeout exceeded, mon will be failed over", mon.Name) - c.failMon(len(quorumStatus.MonMap.Mons), desiredMonCount, mon.Name) + if !c.failMon(len(quorumStatus.MonMap.Mons), desiredMonCount, mon.Name) { + // The failover was skipped, so we continue to see if another mon needs to failover + continue + } // only deal with one unhealthy mon per health check return nil @@ -276,28 +317,55 @@ func (c *Cluster) checkHealth() error { } // failMon compares the monCount against desiredMonCount -func (c *Cluster) failMon(monCount, desiredMonCount int, name string) { +// Returns whether the failover request was attempted. If false, +// the operator should check for other mons to failover. +func (c *Cluster) failMon(monCount, desiredMonCount int, name string) bool { if monCount > desiredMonCount { // no need to create a new mon since we have an extra if err := c.removeMon(name); err != nil { logger.Errorf("failed to remove mon %q. %v", name, err) } - } else { - // prevent any voluntary mon drain while failing over - if err := c.blockMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { - logger.Errorf("failed to block mon drain. %v", err) - } + return true + } - // bring up a new mon to replace the unhealthy mon - if err := c.failoverMon(name); err != nil { - logger.Errorf("failed to failover mon %q. %v", name, err) - } + if err := c.allowFailover(name); err != nil { + logger.Warningf("aborting mon %q failover. %v", name, err) + return false + } - // allow any voluntary mon drain after failover - if err := c.allowMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { - logger.Errorf("failed to allow mon drain. %v", err) - } + // prevent any voluntary mon drain while failing over + if err := c.blockMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { + logger.Errorf("failed to block mon drain. %v", err) } + + // bring up a new mon to replace the unhealthy mon + if err := c.failoverMon(name); err != nil { + logger.Errorf("failed to failover mon %q. %v", name, err) + } + + // allow any voluntary mon drain after failover + if err := c.allowMonDrain(types.NamespacedName{Name: monPDBName, Namespace: c.Namespace}); err != nil { + logger.Errorf("failed to allow mon drain. %v", err) + } + return true +} + +func (c *Cluster) allowFailover(name string) error { + if !c.spec.IsStretchCluster() { + // always failover if not a stretch cluster + return nil + } + if name != c.arbiterMon { + // failover if it's a non-arbiter + return nil + } + if c.ClusterInfo.CephVersion.IsAtLeast(arbiterFailoverSupportedCephVersion) { + // failover the arbiter if at least v16.2.7 + return nil + } + + // Ceph does not support updating the arbiter mon in older versions + return errors.Errorf("refusing to failover arbiter mon %q on a stretched cluster until upgrading to ceph version %s", name, arbiterFailoverSupportedCephVersion.String()) } func (c *Cluster) removeOrphanMonResources() { @@ -387,6 +455,9 @@ func (c *Cluster) failoverMon(name string) error { // remove the failed mon from a local list of the existing mons for finding a stretch zone existingMons := c.clusterInfoToMonConfig(name) + // Cache the name of the current arbiter in case it is updated during the failover + // This allows a simple check for updating the arbiter later in this method + currentArbiter := c.arbiterMon zone, err := c.findAvailableZoneIfStretched(existingMons) if err != nil { return errors.Wrap(err, "failed to find available stretch zone") @@ -425,12 +496,11 @@ func (c *Cluster) failoverMon(name string) error { } // Assign to a zone if a stretch cluster - if c.spec.IsStretchCluster() { - if name == c.arbiterMon { - // Update the arbiter mon for the stretch cluster if it changed - if err := c.ConfigureArbiter(); err != nil { - return errors.Wrap(err, "failed to configure stretch arbiter") - } + if c.spec.IsStretchCluster() && name == currentArbiter { + // Update the arbiter mon for the stretch cluster if it changed + failingOver := true + if err := c.ConfigureArbiter(failingOver); err != nil { + return errors.Wrap(err, "failed to configure stretch arbiter") } } @@ -490,6 +560,12 @@ func (c *Cluster) removeMon(daemonName string) error { return errors.Wrapf(err, "failed to save mon config after failing over mon %s", daemonName) } + // Update cluster-wide RBD bootstrap peer token since Monitors have changed + _, err := controller.CreateBootstrapPeerSecret(c.context, c.ClusterInfo, &cephv1.CephCluster{ObjectMeta: metav1.ObjectMeta{Name: c.ClusterInfo.NamespacedName().Name, Namespace: c.Namespace}}, c.ownerInfo) + if err != nil { + return errors.Wrap(err, "failed to update cluster rbd bootstrap peer token") + } + return nil } @@ -541,10 +617,6 @@ func (c *Cluster) addOrRemoveExternalMonitor(status cephclient.MonStatusResponse logger.Debugf("ClusterInfo is now Empty, refilling it from status.MonMap.Mons") monCount := len(status.MonMap.Mons) - if monCount%2 == 0 { - logger.Warningf("external cluster mon count is even (%d), should be uneven, continuing.", monCount) - } - if monCount == 1 { logger.Warning("external cluster mon count is 1, consider adding new monitors.") } diff --git a/pkg/operator/ceph/cluster/mon/health_test.go b/pkg/operator/ceph/cluster/mon/health_test.go index 730eede7a3c6..d11e6faf92ff 100644 --- a/pkg/operator/ceph/cluster/mon/health_test.go +++ b/pkg/operator/ceph/cluster/mon/health_test.go @@ -31,6 +31,7 @@ import ( cephclient "github.com/rook/rook/pkg/daemon/ceph/client" clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" "github.com/rook/rook/pkg/operator/ceph/config" + "github.com/rook/rook/pkg/operator/ceph/version" testopk8s "github.com/rook/rook/pkg/operator/k8sutil/test" "github.com/rook/rook/pkg/operator/test" exectest "github.com/rook/rook/pkg/util/exec/test" @@ -50,6 +51,10 @@ func TestCheckHealth(t *testing.T) { executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return clienttest.MonInQuorumResponse(), nil }, } @@ -150,12 +155,52 @@ func TestCheckHealth(t *testing.T) { } } +func TestSkipMonFailover(t *testing.T) { + c := New(&clusterd.Context{}, "ns", cephv1.ClusterSpec{}, nil, nil) + c.ClusterInfo = clienttest.CreateTestClusterInfo(1) + monName := "arb" + + t.Run("don't skip failover for non-stretch", func(t *testing.T) { + assert.NoError(t, c.allowFailover(monName)) + }) + + t.Run("don't skip failover for non-arbiter", func(t *testing.T) { + c.spec.Mon.Count = 5 + c.spec.Mon.StretchCluster = &cephv1.StretchClusterSpec{ + Zones: []cephv1.StretchClusterZoneSpec{ + {Name: "a"}, + {Name: "b"}, + {Name: "c", Arbiter: true}, + }, + } + + assert.NoError(t, c.allowFailover(monName)) + }) + + t.Run("skip failover for arbiter if an older version of ceph", func(t *testing.T) { + c.arbiterMon = monName + c.ClusterInfo.CephVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 6} + assert.Error(t, c.allowFailover(monName)) + }) + + t.Run("don't skip failover for arbiter if a newer version of ceph", func(t *testing.T) { + c.ClusterInfo.CephVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 7} + assert.NoError(t, c.allowFailover(monName)) + }) +} + func TestEvictMonOnSameNode(t *testing.T) { ctx := context.TODO() clientset := test.New(t, 1) configDir, _ := ioutil.TempDir("", "") defer os.RemoveAll(configDir) - context := &clusterd.Context{Clientset: clientset, ConfigDir: configDir, Executor: &exectest.MockExecutor{}, RequestCancelOrchestration: abool.New()} + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + return "{\"key\":\"mysecurekey\"}", nil + }, + } + context := &clusterd.Context{Clientset: clientset, ConfigDir: configDir, Executor: executor, RequestCancelOrchestration: abool.New()} ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() c := New(context, "ns", cephv1.ClusterSpec{}, ownerInfo, &sync.Mutex{}) setCommonMonProperties(c, 1, cephv1.MonSpec{Count: 0}, "myversion") @@ -246,6 +291,10 @@ func TestCheckHealthNotFound(t *testing.T) { executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return clienttest.MonInQuorumResponse(), nil }, } @@ -305,6 +354,10 @@ func TestAddRemoveMons(t *testing.T) { monQuorumResponse := clienttest.MonInQuorumResponse() executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("executing command: %s %+v", command, args) + if args[0] == "auth" && args[1] == "get-or-create-key" { + return "{\"key\":\"mysecurekey\"}", nil + } return monQuorumResponse, nil }, } @@ -440,25 +493,65 @@ func TestAddOrRemoveExternalMonitor(t *testing.T) { func TestNewHealthChecker(t *testing.T) { c := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{}}} - time10s, _ := time.ParseDuration("10s") - c10s := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: time10s}}}}}} type args struct { monCluster *Cluster } - tests := []struct { + tests := struct { name string args args want *HealthChecker }{ - {"default-interval", args{c}, &HealthChecker{c, HealthCheckInterval}}, - {"10s-interval", args{c10s}, &HealthChecker{c10s, time10s}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := NewHealthChecker(tt.args.monCluster); !reflect.DeepEqual(got, tt.want) { - t.Errorf("NewHealthChecker() = %v, want %v", got, tt.want) - } - }) + "default-interval", args{c}, &HealthChecker{c, HealthCheckInterval}, } + t.Run(tests.name, func(t *testing.T) { + if got := NewHealthChecker(tests.args.monCluster); !reflect.DeepEqual(got, tests.want) { + t.Errorf("NewHealthChecker() = %v, want %v", got, tests.want) + } + }) +} + +func TestUpdateMonTimeout(t *testing.T) { + t.Run("using default mon timeout", func(t *testing.T) { + m := &Cluster{} + updateMonTimeout(m) + assert.Equal(t, time.Minute*10, MonOutTimeout) + }) + t.Run("using env var mon timeout", func(t *testing.T) { + os.Setenv("ROOK_MON_OUT_TIMEOUT", "10s") + defer os.Unsetenv("ROOK_MON_OUT_TIMEOUT") + m := &Cluster{} + updateMonTimeout(m) + assert.Equal(t, time.Second*10, MonOutTimeout) + }) + t.Run("using spec mon timeout", func(t *testing.T) { + m := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Timeout: "1m"}}}}} + updateMonTimeout(m) + assert.Equal(t, time.Minute, MonOutTimeout) + }) +} + +func TestUpdateMonInterval(t *testing.T) { + t.Run("using default mon interval", func(t *testing.T) { + m := &Cluster{} + h := &HealthChecker{m, HealthCheckInterval} + updateMonInterval(m, h) + assert.Equal(t, time.Second*45, HealthCheckInterval) + }) + t.Run("using env var mon timeout", func(t *testing.T) { + os.Setenv("ROOK_MON_HEALTHCHECK_INTERVAL", "10s") + defer os.Unsetenv("ROOK_MON_HEALTHCHECK_INTERVAL") + m := &Cluster{} + h := &HealthChecker{m, HealthCheckInterval} + updateMonInterval(m, h) + assert.Equal(t, time.Second*10, h.interval) + }) + t.Run("using spec mon timeout", func(t *testing.T) { + tm, err := time.ParseDuration("1m") + assert.NoError(t, err) + m := &Cluster{spec: cephv1.ClusterSpec{HealthCheck: cephv1.CephClusterHealthCheckSpec{DaemonHealth: cephv1.DaemonHealthSpec{Monitor: cephv1.HealthCheckSpec{Interval: &metav1.Duration{Duration: tm}}}}}} + h := &HealthChecker{m, HealthCheckInterval} + updateMonInterval(m, h) + assert.Equal(t, time.Minute, h.interval) + }) } diff --git a/pkg/operator/ceph/cluster/mon/mon.go b/pkg/operator/ceph/cluster/mon/mon.go index c6c6566b7cf1..8ffa85c62b80 100644 --- a/pkg/operator/ceph/cluster/mon/mon.go +++ b/pkg/operator/ceph/cluster/mon/mon.go @@ -338,11 +338,20 @@ func (c *Cluster) isArbiterZone(zone string) bool { return c.getArbiterZone() == zone } -func (c *Cluster) ConfigureArbiter() error { +func (c *Cluster) ConfigureArbiter(failingOver bool) error { if c.arbiterMon == "" { return errors.New("arbiter not specified for the stretch cluster") } + failureDomain := c.stretchFailureDomainName() + if failingOver { + // Set the new mon tiebreaker + if err := cephclient.SetNewTiebreaker(c.context, c.ClusterInfo, c.arbiterMon); err != nil { + return errors.Wrap(err, "failed to set new mon tiebreaker") + } + return nil + } + monDump, err := cephclient.GetMonDump(c.context, c.ClusterInfo) if err != nil { logger.Warningf("attempting to enable arbiter after failed to detect if already enabled. %v", err) @@ -354,7 +363,6 @@ func (c *Cluster) ConfigureArbiter() error { // Wait for the CRUSH map to have at least two zones // The timeout is relatively short since the operator will requeue the reconcile // and try again at a higher level if not yet found - failureDomain := c.stretchFailureDomainName() logger.Infof("enabling stretch mode... waiting for two failure domains of type %q to be found in the CRUSH map after OSD initialization", failureDomain) pollInterval := 5 * time.Second totalWaitTime := 2 * time.Minute diff --git a/pkg/operator/ceph/cluster/mon/node.go b/pkg/operator/ceph/cluster/mon/node.go index 595b37e25386..7a231e372f64 100644 --- a/pkg/operator/ceph/cluster/mon/node.go +++ b/pkg/operator/ceph/cluster/mon/node.go @@ -34,8 +34,20 @@ func getNodeInfoFromNode(n v1.Node) (*MonScheduleInfo, error) { break } } + + // If no internal IP found try to use an external IP + if nr.Address == "" { + for _, ip := range n.Status.Addresses { + if ip.Type == v1.NodeExternalIP { + logger.Debugf("using external IP %s for node %s", ip.Address, n.Name) + nr.Address = ip.Address + break + } + } + } + if nr.Address == "" { - return nil, errors.Errorf("failed to find any internal IP on node %s", nr.Name) + return nil, errors.Errorf("failed to find any IP on node %s", nr.Name) } return nr, nil } diff --git a/pkg/operator/ceph/cluster/mon/node_test.go b/pkg/operator/ceph/cluster/mon/node_test.go index ec8eeb296e25..73257797fa22 100644 --- a/pkg/operator/ceph/cluster/mon/node_test.go +++ b/pkg/operator/ceph/cluster/mon/node_test.go @@ -23,7 +23,7 @@ import ( "testing" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - "github.com/rook/rook/pkg/apis/rook.io" + rook "github.com/rook/rook/pkg/apis/rook.io" "github.com/rook/rook/pkg/clusterd" clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" cephver "github.com/rook/rook/pkg/operator/ceph/version" @@ -214,20 +214,35 @@ func TestGetNodeInfoFromNode(t *testing.T) { assert.NotNil(t, node) node.Status = v1.NodeStatus{} + node.Status.Addresses = []v1.NodeAddress{} + + var info *MonScheduleInfo + _, err = getNodeInfoFromNode(*node) + assert.NotNil(t, err) + + // With internalIP and externalIP node.Status.Addresses = []v1.NodeAddress{ { Type: v1.NodeExternalIP, Address: "1.1.1.1", }, + { + Type: v1.NodeInternalIP, + Address: "172.17.0.1", + }, } + info, err = getNodeInfoFromNode(*node) + assert.NoError(t, err) + assert.Equal(t, "172.17.0.1", info.Address) // Must return the internalIP - var info *MonScheduleInfo - _, err = getNodeInfoFromNode(*node) - assert.NotNil(t, err) - - node.Status.Addresses[0].Type = v1.NodeInternalIP - node.Status.Addresses[0].Address = "172.17.0.1" + // With externalIP only + node.Status.Addresses = []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "1.2.3.4", + }, + } info, err = getNodeInfoFromNode(*node) assert.NoError(t, err) - assert.Equal(t, "172.17.0.1", info.Address) + assert.Equal(t, "1.2.3.4", info.Address) } diff --git a/pkg/operator/ceph/cluster/mon/spec.go b/pkg/operator/ceph/cluster/mon/spec.go index 5bdbbc91e417..45541e2da26e 100644 --- a/pkg/operator/ceph/cluster/mon/spec.go +++ b/pkg/operator/ceph/cluster/mon/spec.go @@ -370,7 +370,7 @@ func UpdateCephDeploymentAndWait(context *clusterd.Context, clusterInfo *client. err := client.OkToContinue(context, clusterInfo, deployment.Name, daemonType, daemonName) if err != nil { if continueUpgradeAfterChecksEvenIfNotHealthy { - logger.Infof("The %s daemon %s is not ok-to-stop but 'continueUpgradeAfterChecksEvenIfNotHealthy' is true, so continuing...", daemonType, daemonName) + logger.Infof("The %s daemon %s is not ok-to-continue but 'continueUpgradeAfterChecksEvenIfNotHealthy' is true, so continuing...", daemonType, daemonName) return nil } return errors.Wrapf(err, "failed to check if we can %s the deployment %s", action, deployment.Name) diff --git a/pkg/operator/ceph/cluster/osd/create.go b/pkg/operator/ceph/cluster/osd/create.go index 30b200a1ffa2..3ec8ff7e413f 100644 --- a/pkg/operator/ceph/cluster/osd/create.go +++ b/pkg/operator/ceph/cluster/osd/create.go @@ -25,17 +25,16 @@ import ( osdconfig "github.com/rook/rook/pkg/operator/ceph/cluster/osd/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util" v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" ) type createConfig struct { cluster *Cluster provisionConfig *provisionConfig - awaitingStatusConfigMaps *util.Set // These status configmaps were created for OSD prepare jobs - finishedStatusConfigMaps *util.Set // Status configmaps are added here as provisioning is completed for them + awaitingStatusConfigMaps sets.String // These status configmaps were created for OSD prepare jobs + finishedStatusConfigMaps sets.String // Status configmaps are added here as provisioning is completed for them deployments *existenceList // these OSDs have existing deployments } @@ -49,27 +48,27 @@ var ( func (c *Cluster) newCreateConfig( provisionConfig *provisionConfig, - awaitingStatusConfigMaps *util.Set, + awaitingStatusConfigMaps sets.String, deployments *existenceList, ) *createConfig { if awaitingStatusConfigMaps == nil { - awaitingStatusConfigMaps = util.NewSet() + awaitingStatusConfigMaps = sets.NewString() } return &createConfig{ c, provisionConfig, awaitingStatusConfigMaps, - util.NewSet(), + sets.NewString(), deployments, } } func (c *createConfig) progress() (completed, initial int) { - return c.finishedStatusConfigMaps.Count(), c.awaitingStatusConfigMaps.Count() + return c.finishedStatusConfigMaps.Len(), c.awaitingStatusConfigMaps.Len() } func (c *createConfig) doneCreating() bool { - return c.awaitingStatusConfigMaps.Count() == c.finishedStatusConfigMaps.Count() + return c.awaitingStatusConfigMaps.Len() == c.finishedStatusConfigMaps.Len() } func (c *createConfig) createNewOSDsFromStatus( @@ -77,13 +76,13 @@ func (c *createConfig) createNewOSDsFromStatus( nodeOrPVCName string, errs *provisionErrors, ) { - if !c.awaitingStatusConfigMaps.Contains(statusConfigMapName(nodeOrPVCName)) { + if !c.awaitingStatusConfigMaps.Has(statusConfigMapName(nodeOrPVCName)) { // If there is a dangling OSD prepare configmap from another reconcile, don't process it logger.Infof("not creating deployments for OSD prepare results found in ConfigMap %q which was not created for the latest storage spec", statusConfigMapName(nodeOrPVCName)) return } - if c.finishedStatusConfigMaps.Contains(statusConfigMapName(nodeOrPVCName)) { + if c.finishedStatusConfigMaps.Has(statusConfigMapName(nodeOrPVCName)) { // If we have already processed this configmap, don't process it again logger.Infof("not creating deployments for OSD prepare results found in ConfigMap %q which was already processed", statusConfigMapName(nodeOrPVCName)) return @@ -115,7 +114,7 @@ func (c *createConfig) createNewOSDsFromStatus( // Call this if createNewOSDsFromStatus() isn't going to be called (like for a failed status) func (c *createConfig) doneWithStatus(nodeOrPVCName string) { - c.finishedStatusConfigMaps.Add(statusConfigMapName(nodeOrPVCName)) + c.finishedStatusConfigMaps.Insert(statusConfigMapName(nodeOrPVCName)) } // Returns a set of all the awaitingStatusConfigMaps that will be updated by provisioning jobs. @@ -124,34 +123,34 @@ func (c *createConfig) doneWithStatus(nodeOrPVCName string) { // // Creation of prepare jobs is most directly related to creating new OSDs. And we want to keep all // usage of awaitingStatusConfigMaps in this file. -func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provisionErrors) (*util.Set, error) { +func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provisionErrors) (sets.String, error) { // Parsing storageClassDeviceSets and parsing it to volume sources c.prepareStorageClassDeviceSets(errs) // no valid VolumeSource is ready to run an osd if len(c.deviceSets) == 0 { logger.Info("no storageClassDeviceSets defined to configure OSDs on PVCs") - return util.NewSet(), nil + return sets.NewString(), nil } // Check k8s version k8sVersion, err := k8sutil.GetK8SVersion(c.context.Clientset) if err != nil { errs.addError("failed to provision OSDs on PVCs. user has specified storageClassDeviceSets, but the Kubernetes version could not be determined. minimum Kubernetes version required: 1.13.0. %v", err) - return util.NewSet(), nil + return sets.NewString(), nil } if !k8sVersion.AtLeast(version.MustParseSemantic("v1.13.0")) { errs.addError("failed to provision OSDs on PVCs. user has specified storageClassDeviceSets, but the Kubernetes version is not supported. user must update Kubernetes version. minimum Kubernetes version required: 1.13.0. version detected: %s", k8sVersion.String()) - return util.NewSet(), nil + return sets.NewString(), nil } existingDeployments, err := c.getExistingOSDDeploymentsOnPVCs() if err != nil { errs.addError("failed to provision OSDs on PVCs. failed to query existing OSD deployments on PVCs. %v", err) - return util.NewSet(), nil + return sets.NewString(), nil } - awaitingStatusConfigMaps := util.NewSet() + awaitingStatusConfigMaps := sets.NewString() for _, volume := range c.deviceSets { // Check whether we need to cancel the orchestration if err := opcontroller.CheckForCancelledOrchestration(c.context); err != nil { @@ -233,7 +232,7 @@ func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provi } // Skip OSD prepare if deployment already exists for the PVC - if existingDeployments.Contains(dataSource.ClaimName) { + if existingDeployments.Has(dataSource.ClaimName) { logger.Debugf("skipping OSD prepare job creation for PVC %q because OSD daemon using the PVC already exists", osdProps.crushHostname) continue } @@ -251,7 +250,7 @@ func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provi // record the name of the status configmap that will eventually receive results from the // OSD provisioning job we just created. This will help us determine when we are done // processing the results of provisioning jobs. - awaitingStatusConfigMaps.Add(cmName) + awaitingStatusConfigMaps.Insert(cmName) } return awaitingStatusConfigMaps, nil @@ -263,10 +262,10 @@ func (c *Cluster) startProvisioningOverPVCs(config *provisionConfig, errs *provi // // Creation of prepare jobs is most directly related to creating new OSDs. And we want to keep all // usage of awaitingStatusConfigMaps in this file. -func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *provisionErrors) (*util.Set, error) { +func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *provisionErrors) (sets.String, error) { if !c.spec.Storage.UseAllNodes && len(c.spec.Storage.Nodes) == 0 { logger.Info("no nodes are defined for configuring OSDs on raw devices") - return util.NewSet(), nil + return sets.NewString(), nil } if c.spec.Storage.UseAllNodes { @@ -278,7 +277,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov hostnameMap, err := k8sutil.GetNodeHostNames(c.context.Clientset) if err != nil { errs.addError("failed to provision OSDs on nodes. failed to get node hostnames. %v", err) - return util.NewSet(), nil + return sets.NewString(), nil } c.spec.Storage.Nodes = nil for _, hostname := range hostnameMap { @@ -300,15 +299,15 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov // no valid node is ready to run an osd if len(validNodes) == 0 { logger.Warningf("no valid nodes available to run osds on nodes in namespace %q", c.clusterInfo.Namespace) - return util.NewSet(), nil + return sets.NewString(), nil } if len(c.spec.DataDirHostPath) == 0 { errs.addError("failed to provision OSDs on nodes. user has specified valid nodes for storage, but dataDirHostPath is empty. user must set CephCluster dataDirHostPath") - return util.NewSet(), nil + return sets.NewString(), nil } - awaitingStatusConfigMaps := util.NewSet() + awaitingStatusConfigMaps := sets.NewString() for _, node := range c.ValidStorage.Nodes { // Check whether we need to cancel the orchestration if err := opcontroller.CheckForCancelledOrchestration(c.context); err != nil { @@ -353,7 +352,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov // record the name of the status configmap that will eventually receive results from the // OSD provisioning job we just created. This will help us determine when we are done // processing the results of provisioning jobs. - awaitingStatusConfigMaps.Add(cmName) + awaitingStatusConfigMaps.Insert(cmName) } return awaitingStatusConfigMaps, nil @@ -372,11 +371,7 @@ func (c *Cluster) runPrepareJob(osdProps *osdProperties, config *provisionConfig } if err := k8sutil.RunReplaceableJob(c.context.Clientset, job, false); err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to run provisioning job for %s %q", nodeOrPVC, nodeOrPVCName) - } - logger.Infof("letting preexisting OSD provisioning job run to completion for %s %q", nodeOrPVC, nodeOrPVCName) - return nil + return errors.Wrapf(err, "failed to run osd provisioning job for %s %q", nodeOrPVC, nodeOrPVCName) } logger.Infof("started OSD provisioning job for %s %q", nodeOrPVC, nodeOrPVCName) diff --git a/pkg/operator/ceph/cluster/osd/create_test.go b/pkg/operator/ceph/cluster/osd/create_test.go index e79938b1b651..5272da8eab70 100644 --- a/pkg/operator/ceph/cluster/osd/create_test.go +++ b/pkg/operator/ceph/cluster/osd/create_test.go @@ -27,7 +27,6 @@ import ( cephclient "github.com/rook/rook/pkg/daemon/ceph/client" cephver "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/test" - "github.com/rook/rook/pkg/util" "github.com/stretchr/testify/assert" "github.com/tevino/abool" corev1 "k8s.io/api/core/v1" @@ -35,6 +34,7 @@ import ( apiresource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" ) @@ -93,7 +93,7 @@ func Test_createNewOSDsFromStatus(t *testing.T) { spec := cephv1.ClusterSpec{} var status *OrchestrationStatus - awaitingStatusConfigMaps := util.NewSet() + awaitingStatusConfigMaps := sets.NewString() var c *Cluster var createConfig *createConfig @@ -102,10 +102,10 @@ func Test_createNewOSDsFromStatus(t *testing.T) { // none of this code should ever add or remove deployments from the existence list assert.Equal(t, 3, deployments.Len()) // Simulate environment where provision jobs were created for node0, node2, pvc1, and pvc2 - awaitingStatusConfigMaps = util.NewSet() - awaitingStatusConfigMaps.AddMultiple([]string{ + awaitingStatusConfigMaps = sets.NewString() + awaitingStatusConfigMaps.Insert( statusNameNode0, statusNameNode2, - statusNamePVC1, statusNamePVC2}) + statusNamePVC1, statusNamePVC2) createCallsOnNode = createCallsOnNode[:0] createCallsOnPVC = createCallsOnPVC[:0] errs = newProvisionErrors() @@ -128,9 +128,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.Len(t, createCallsOnNode, 0) assert.Len(t, createCallsOnPVC, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNameNode0)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) }) t.Run("test: node: create all OSDs on node when all do not exist", func(t *testing.T) { @@ -146,9 +146,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnNode, []int{0, 1, 2}) assert.Len(t, createCallsOnPVC, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNameNode2)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode2)) }) t.Run("node: create only nonexistent OSDs on node when some already exist", func(t *testing.T) { @@ -167,9 +167,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnNode, []int{5, 7}) assert.Len(t, createCallsOnPVC, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNameNode0)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) }) t.Run("node: skip creating OSDs for status configmaps that weren't created for this reconcile", func(t *testing.T) { @@ -185,8 +185,8 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnNode, []int{}) assert.Len(t, createCallsOnPVC, 0) // status map should NOT have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Count()) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Len()) }) t.Run("node: errors reported if OSDs fail to create", func(t *testing.T) { @@ -203,9 +203,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnNode, []int{0, 1, 2}) assert.Len(t, createCallsOnPVC, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNameNode0)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNameNode0)) induceFailureCreatingOSD = -1 // off }) @@ -220,9 +220,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.Len(t, createCallsOnNode, 0) assert.Len(t, createCallsOnPVC, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNamePVC1)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) }) t.Run("pvc: create all OSDs on pvc when all do not exist", func(t *testing.T) { @@ -238,9 +238,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnPVC, []int{0, 1, 2}) assert.Len(t, createCallsOnNode, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNamePVC2)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC2)) }) t.Run("pvc: create only nonexistent OSDs on pvc when some already exist", func(t *testing.T) { @@ -259,9 +259,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnPVC, []int{5, 7}) assert.Len(t, createCallsOnNode, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNamePVC1)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) }) t.Run("pvc: skip creating OSDs for status configmaps that weren't created for this reconcile", func(t *testing.T) { @@ -277,8 +277,8 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnPVC, []int{}) assert.Len(t, createCallsOnNode, 0) // no status maps should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Count()) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 0, createConfig.finishedStatusConfigMaps.Len()) }) t.Run("pvc: errors reported if OSDs fail to create", func(t *testing.T) { @@ -295,9 +295,9 @@ func Test_createNewOSDsFromStatus(t *testing.T) { assert.ElementsMatch(t, createCallsOnPVC, []int{0, 1, 2}) assert.Len(t, createCallsOnNode, 0) // status map should have been marked completed - assert.Equal(t, 4, awaitingStatusConfigMaps.Count()) - assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Count()) - assert.True(t, createConfig.finishedStatusConfigMaps.Contains(statusNamePVC1)) + assert.Equal(t, 4, awaitingStatusConfigMaps.Len()) + assert.Equal(t, 1, createConfig.finishedStatusConfigMaps.Len()) + assert.True(t, createConfig.finishedStatusConfigMaps.Has(statusNamePVC1)) induceFailureCreatingOSD = -1 // off }) } @@ -323,7 +323,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { var errs *provisionErrors var c *Cluster var config *provisionConfig - var awaitingStatusConfigMaps *util.Set + var awaitingStatusConfigMaps sets.String var err error doSetup := func() { test.SetFakeKubernetesVersion(clientset, fakeK8sVersion) // PVCs require k8s version v1.13+ @@ -341,7 +341,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Zero(t, awaitingStatusConfigMaps.Count()) + assert.Zero(t, awaitingStatusConfigMaps.Len()) assert.Zero(t, errs.len()) // no result configmaps should have been created cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -366,7 +366,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Zero(t, awaitingStatusConfigMaps.Count()) + assert.Zero(t, awaitingStatusConfigMaps.Len()) assert.Zero(t, errs.len()) // this was not a problem with a single job but with ALL jobs // no result configmaps should have been created cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -391,7 +391,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Equal(t, 2, awaitingStatusConfigMaps.Count()) + assert.Equal(t, 2, awaitingStatusConfigMaps.Len()) assert.Zero(t, errs.len()) cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) @@ -403,7 +403,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Equal(t, 2, awaitingStatusConfigMaps.Count()) + assert.Equal(t, 2, awaitingStatusConfigMaps.Len()) assert.Zero(t, errs.len()) cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) @@ -417,7 +417,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Equal(t, 0, awaitingStatusConfigMaps.Count()) + assert.Equal(t, 0, awaitingStatusConfigMaps.Len()) assert.Equal(t, 1, errs.len()) cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) @@ -433,7 +433,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.Error(t, err) assert.Zero(t, errs.len()) - assert.Zero(t, awaitingStatusConfigMaps.Count()) + assert.Zero(t, awaitingStatusConfigMaps.Len()) requestCancelOrchestration.UnSet() }) @@ -453,7 +453,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) { doSetup() awaitingStatusConfigMaps, err = c.startProvisioningOverPVCs(config, errs) assert.NoError(t, err) - assert.Equal(t, 0, awaitingStatusConfigMaps.Count()) + assert.Equal(t, 0, awaitingStatusConfigMaps.Len()) assert.Equal(t, 1, errs.len()) cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) @@ -490,7 +490,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { var errs *provisionErrors var c *Cluster var config *provisionConfig - var prepareJobsRun *util.Set + var prepareJobsRun sets.String var err error var cms *corev1.ConfigMapList doSetup := func() { @@ -508,7 +508,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { doSetup() prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) assert.NoError(t, err) - assert.Zero(t, prepareJobsRun.Count()) + assert.Zero(t, prepareJobsRun.Len()) assert.Zero(t, errs.len()) // no result configmaps should have been created cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -532,7 +532,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { doSetup() prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) assert.NoError(t, err) - assert.Zero(t, prepareJobsRun.Count()) + assert.Zero(t, prepareJobsRun.Len()) assert.Equal(t, 1, errs.len()) // this was not a problem with a single job but with ALL jobs // no result configmaps should have been created cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -549,7 +549,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { assert.Zero(t, errs.len()) assert.ElementsMatch(t, []string{statusNameNode0, statusNameNode1, statusNameNode2}, - prepareJobsRun.ToSlice(), + prepareJobsRun.List(), ) // all result configmaps should have been created cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) @@ -568,7 +568,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { assert.Zero(t, errs.len()) assert.ElementsMatch(t, []string{statusNameNode0, statusNameNode1, statusNameNode2}, - prepareJobsRun.ToSlice(), + prepareJobsRun.List(), ) }) @@ -595,7 +595,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { assert.Zero(t, errs.len()) assert.ElementsMatch(t, []string{statusNameNode0, statusNameNode2}, - prepareJobsRun.ToSlice(), + prepareJobsRun.List(), ) }) @@ -605,7 +605,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) assert.Error(t, err) assert.Zero(t, errs.len()) - assert.Zero(t, prepareJobsRun.Count()) + assert.Zero(t, prepareJobsRun.Len()) requestCancelOrchestration.UnSet() }) @@ -626,7 +626,7 @@ func Test_startProvisioningOverNodes(t *testing.T) { prepareJobsRun, err = c.startProvisioningOverNodes(config, errs) assert.NoError(t, err) assert.Zero(t, errs.len()) - assert.Zero(t, prepareJobsRun.Count()) + assert.Zero(t, prepareJobsRun.Len()) }) t.Run("failures running prepare jobs", func(t *testing.T) { @@ -671,13 +671,13 @@ func Test_startProvisioningOverNodes(t *testing.T) { assert.Equal(t, 1, errs.len()) assert.ElementsMatch(t, []string{statusNameNode0}, - prepareJobsRun.ToSlice(), + prepareJobsRun.List(), ) // with a fresh clientset, only the one results ConfigMap should exist cms, err = clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) assert.Len(t, cms.Items, 1) - assert.Equal(t, prepareJobsRun.ToSlice()[0], cms.Items[0].Name) + assert.Equal(t, prepareJobsRun.List()[0], cms.Items[0].Name) }) } diff --git a/pkg/operator/ceph/cluster/osd/deviceSet.go b/pkg/operator/ceph/cluster/osd/deviceSet.go index 30fac5d46775..3081ecf1a5c2 100644 --- a/pkg/operator/ceph/cluster/osd/deviceSet.go +++ b/pkg/operator/ceph/cluster/osd/deviceSet.go @@ -27,9 +27,9 @@ import ( "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" ) // deviceSet is the processed version of the StorageClassDeviceSet @@ -91,8 +91,8 @@ func (c *Cluster) prepareStorageClassDeviceSets(errs *provisionErrors) { highestExistingID := -1 countInDeviceSet := 0 if existingIDs, ok := uniqueOSDsPerDeviceSet[deviceSet.Name]; ok { - logger.Infof("verifying PVCs exist for %d OSDs in device set %q", existingIDs.Count(), deviceSet.Name) - for existingID := range existingIDs.Iter() { + logger.Infof("verifying PVCs exist for %d OSDs in device set %q", existingIDs.Len(), deviceSet.Name) + for existingID := range existingIDs { pvcID, err := strconv.Atoi(existingID) if err != nil { errs.addError("invalid PVC index %q found for device set %q", existingID, deviceSet.Name) @@ -105,7 +105,7 @@ func (c *Cluster) prepareStorageClassDeviceSets(errs *provisionErrors) { deviceSet := c.createDeviceSetPVCsForIndex(deviceSet, existingPVCs, pvcID, errs) c.deviceSets = append(c.deviceSets, deviceSet) } - countInDeviceSet = existingIDs.Count() + countInDeviceSet = existingIDs.Len() } // Create new PVCs if we are not yet at the expected count // No new PVCs will be created if we have too many @@ -130,17 +130,17 @@ func (c *Cluster) createDeviceSetPVCsForIndex(newDeviceSet cephv1.StorageClassDe var crushDeviceClass string var crushInitialWeight string var crushPrimaryAffinity string - typesFound := util.NewSet() + typesFound := sets.NewString() for _, pvcTemplate := range newDeviceSet.VolumeClaimTemplates { if pvcTemplate.Name == "" { // For backward compatibility a blank name must be treated as a data volume pvcTemplate.Name = bluestorePVCData } - if typesFound.Contains(pvcTemplate.Name) { + if typesFound.Has(pvcTemplate.Name) { errs.addError("found duplicate volume claim template %q for device set %q", pvcTemplate.Name, newDeviceSet.Name) continue } - typesFound.Add(pvcTemplate.Name) + typesFound.Insert(pvcTemplate.Name) pvc, err := c.createDeviceSetPVC(existingPVCs, newDeviceSet.Name, pvcTemplate, setIndex) if err != nil { @@ -247,7 +247,7 @@ func makeDeviceSetPVC(deviceSetName, pvcID string, setIndex int, pvcTemplate v1. } // GetExistingPVCs fetches the list of OSD PVCs -func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]*util.Set, error) { +func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]sets.String, error) { ctx := context.TODO() selector := metav1.ListOptions{LabelSelector: CephDeviceSetPVCIDLabelKey} pvcs, err := clusterdContext.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(ctx, selector) @@ -255,7 +255,7 @@ func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[s return nil, nil, errors.Wrap(err, "failed to detect PVCs") } result := map[string]*v1.PersistentVolumeClaim{} - uniqueOSDsPerDeviceSet := map[string]*util.Set{} + uniqueOSDsPerDeviceSet := map[string]sets.String{} for i, pvc := range pvcs.Items { // Populate the PVCs based on their unique name across all the device sets pvcID := pvc.Labels[CephDeviceSetPVCIDLabelKey] @@ -265,9 +265,9 @@ func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[s deviceSet := pvc.Labels[CephDeviceSetLabelKey] pvcIndex := pvc.Labels[CephSetIndexLabelKey] if _, ok := uniqueOSDsPerDeviceSet[deviceSet]; !ok { - uniqueOSDsPerDeviceSet[deviceSet] = util.NewSet() + uniqueOSDsPerDeviceSet[deviceSet] = sets.NewString() } - uniqueOSDsPerDeviceSet[deviceSet].Add(pvcIndex) + uniqueOSDsPerDeviceSet[deviceSet].Insert(pvcIndex) } return result, uniqueOSDsPerDeviceSet, nil diff --git a/pkg/operator/ceph/cluster/osd/osd.go b/pkg/operator/ceph/cluster/osd/osd.go index c131d51ab9f0..50cc160e399e 100644 --- a/pkg/operator/ceph/cluster/osd/osd.go +++ b/pkg/operator/ceph/cluster/osd/osd.go @@ -37,11 +37,11 @@ import ( "github.com/rook/rook/pkg/operator/ceph/controller" cephver "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" ) var ( @@ -199,21 +199,21 @@ func (c *Cluster) Start() error { updateConfig := c.newUpdateConfig(config, updateQueue, deployments) // prepare for creating new OSDs - statusConfigMaps := util.NewSet() + statusConfigMaps := sets.NewString() logger.Info("start provisioning the OSDs on PVCs, if needed") pvcConfigMaps, err := c.startProvisioningOverPVCs(config, errs) if err != nil { return err } - statusConfigMaps.AddSet(pvcConfigMaps) + statusConfigMaps = statusConfigMaps.Union(pvcConfigMaps) logger.Info("start provisioning the OSDs on nodes, if needed") nodeConfigMaps, err := c.startProvisioningOverNodes(config, errs) if err != nil { return err } - statusConfigMaps.AddSet(nodeConfigMaps.Copy()) + statusConfigMaps = statusConfigMaps.Union(nodeConfigMaps) createConfig := c.newCreateConfig(config, statusConfigMaps, deployments) @@ -239,7 +239,7 @@ func (c *Cluster) Start() error { return nil } -func (c *Cluster) getExistingOSDDeploymentsOnPVCs() (*util.Set, error) { +func (c *Cluster) getExistingOSDDeploymentsOnPVCs() (sets.String, error) { ctx := context.TODO() listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s,%s", k8sutil.AppAttr, AppName, OSDOverPVCLabelKey)} @@ -248,10 +248,10 @@ func (c *Cluster) getExistingOSDDeploymentsOnPVCs() (*util.Set, error) { return nil, errors.Wrap(err, "failed to query existing OSD deployments") } - result := util.NewSet() + result := sets.NewString() for _, deployment := range deployments.Items { if pvcID, ok := deployment.Labels[OSDOverPVCLabelKey]; ok { - result.Add(pvcID) + result.Insert(pvcID) } } diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go index 68e42e604254..9967ae4fd0f1 100644 --- a/pkg/operator/ceph/cluster/osd/osd_test.go +++ b/pkg/operator/ceph/cluster/osd/osd_test.go @@ -493,7 +493,7 @@ func TestGetOSDInfo(t *testing.T) { }) } -func TestOSDPlacement(t *testing.T) { +func TestGetPreparePlacement(t *testing.T) { // no placement prop := osdProperties{} result := prop.getPreparePlacement() diff --git a/pkg/operator/ceph/cluster/osd/provision_spec.go b/pkg/operator/ceph/cluster/osd/provision_spec.go index e1f0f50ae58d..396c1adfb322 100644 --- a/pkg/operator/ceph/cluster/osd/provision_spec.go +++ b/pkg/operator/ceph/cluster/osd/provision_spec.go @@ -157,17 +157,13 @@ func (c *Cluster) provisionPodTemplateSpec(osdProps osdProperties, restart v1.Re podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet } if osdProps.onPVC() { - // The "all" placement is applied separately so it will have lower priority. - // We want placement from the storageClassDeviceSet to be applied and override - // the "all" placement if there are any overlapping placement settings. - c.spec.Placement.All().ApplyToPodSpec(&podSpec) - // Apply storageClassDeviceSet PreparePlacement - // If nodeAffinity is specified both in the device set and "all" placement, - // they will be merged. + c.applyAllPlacementIfNeeded(&podSpec) + // apply storageClassDeviceSets.preparePlacement osdProps.getPreparePlacement().ApplyToPodSpec(&podSpec) } else { - p := cephv1.GetOSDPlacement(c.spec.Placement) - p.ApplyToPodSpec(&podSpec) + c.applyAllPlacementIfNeeded(&podSpec) + // apply spec.placement.prepareosd + c.spec.Placement[cephv1.KeyOSDPrepare].ApplyToPodSpec(&podSpec) } k8sutil.RemoveDuplicateEnvVars(&podSpec) diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go index 594626561299..bbda36b6c138 100644 --- a/pkg/operator/ceph/cluster/osd/spec.go +++ b/pkg/operator/ceph/cluster/osd/spec.go @@ -715,17 +715,13 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC } if osdProps.onPVC() { - // the "all" placement is applied separately so it will have lower priority. - // We want placement from the storageClassDeviceSet to be applied and override - // the "all" placement if there are any overlapping placement settings. - c.spec.Placement.All().ApplyToPodSpec(&deployment.Spec.Template.Spec) - // apply storageClassDeviceSet Placement - // If nodeAffinity is specified both in the device set and "all" placement, - // they will be merged. + c.applyAllPlacementIfNeeded(&deployment.Spec.Template.Spec) + // apply storageClassDeviceSets.Placement osdProps.placement.ApplyToPodSpec(&deployment.Spec.Template.Spec) } else { - p := cephv1.GetOSDPlacement(c.spec.Placement) - p.ApplyToPodSpec(&deployment.Spec.Template.Spec) + c.applyAllPlacementIfNeeded(&deployment.Spec.Template.Spec) + // apply c.spec.Placement.osd + c.spec.Placement[cephv1.KeyOSD].ApplyToPodSpec(&deployment.Spec.Template.Spec) } // portable OSDs must have affinity to the topology where the osd prepare job was executed @@ -745,6 +741,22 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd OSDInfo, provisionC return deployment, nil } +// applyAllPlacementIfNeeded apply spec.placement.all if OnlyApplyOSDPlacement set to false +func (c *Cluster) applyAllPlacementIfNeeded(d *v1.PodSpec) { + // The placement for OSDs is computed from several different places: + // - For non-PVCs: `placement.all` and `placement.osd` + // - For PVCs: `placement.all` and inside the storageClassDeviceSet from the `placement` or `preparePlacement` + + // The placement from these sources will be merged by default (if onlyApplyOSDPlacement is false) in case of NodeAffinity and toleration, + // in case of other placement rule like PodAffinity, PodAntiAffinity... it will override last placement with the current placement applied, + // See ApplyToPodSpec(). + + // apply spec.placement.all when spec.Storage.OnlyApplyOSDPlacement is false + if !c.spec.Storage.OnlyApplyOSDPlacement { + c.spec.Placement.All().ApplyToPodSpec(d) + } +} + func applyTopologyAffinity(spec *v1.PodSpec, osd OSDInfo) error { if osd.TopologyAffinity == "" { logger.Debugf("no topology affinity to set for osd %d", osd.ID) @@ -844,6 +856,22 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string return volume, container } +// The blockdevmapper container copies the device node file, which is regarded as a device special file. +// To be able to perform this action, the CAP_MKNOD capability is required. +// Provide a securityContext which requests the MKNOD capability for the container to function properly. +func getBlockDevMapperContext() *v1.SecurityContext { + privileged := controller.HostPathRequiresPrivileged() + + return &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{ + "MKNOD", + }, + }, + Privileged: &privileged, + } +} + // Currently we can't mount a block mode pv directly to a privileged container // So we mount it to a non privileged init container and then copy it to a common directory mounted inside init container // and the privileged provision container. @@ -863,7 +891,7 @@ func (c *Cluster) getPVCInitContainer(osdProps osdProperties) v1.Container { }, }, VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMount(osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } @@ -895,7 +923,7 @@ func (c *Cluster) getPVCInitContainerActivate(mountPath string, osdProps osdProp }, }, VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } @@ -997,7 +1025,7 @@ func (c *Cluster) generateEncryptionCopyBlockContainer(resources v1.ResourceRequ // volumeMountPVCName is crucial, especially when the block we copy is the metadata block // its value must be the name of the block PV so that all init containers use the same bridge (the emptyDir shared by all the init containers) VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, volumeMountPVCName), getDeviceMapperMount()}, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: resources, } } @@ -1044,7 +1072,7 @@ func (c *Cluster) getPVCMetadataInitContainer(mountPath string, osdProps osdProp Name: fmt.Sprintf("%s-bridge", osdProps.metadataPVC.ClaimName), }, }, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } @@ -1078,7 +1106,7 @@ func (c *Cluster) getPVCMetadataInitContainerActivate(mountPath string, osdProps // We need to call getPvcOSDBridgeMountActivate() so that we can copy the metadata block into the "main" empty dir // This empty dir is passed along every init container VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } @@ -1104,7 +1132,7 @@ func (c *Cluster) getPVCWalInitContainer(mountPath string, osdProps osdPropertie Name: fmt.Sprintf("%s-bridge", osdProps.walPVC.ClaimName), }, }, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } @@ -1138,7 +1166,7 @@ func (c *Cluster) getPVCWalInitContainerActivate(mountPath string, osdProps osdP // We need to call getPvcOSDBridgeMountActivate() so that we can copy the wal block into the "main" empty dir // This empty dir is passed along every init container VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)}, - SecurityContext: controller.PodSecurityContext(), + SecurityContext: getBlockDevMapperContext(), Resources: osdProps.resources, } } diff --git a/pkg/operator/ceph/cluster/osd/spec_test.go b/pkg/operator/ceph/cluster/osd/spec_test.go index e24085e57988..030668514bd7 100644 --- a/pkg/operator/ceph/cluster/osd/spec_test.go +++ b/pkg/operator/ceph/cluster/osd/spec_test.go @@ -32,6 +32,7 @@ import ( exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/kubernetes/fake" @@ -707,3 +708,164 @@ func getDummyDeploymentOnNode(clientset *fake.Clientset, c *Cluster, nodeName st } return d } + +func TestOSDPlacement(t *testing.T) { + clientset := fake.NewSimpleClientset() + clusterInfo := &cephclient.ClusterInfo{ + Namespace: "ns", + CephVersion: cephver.Nautilus, + } + clusterInfo.SetName("testing") + clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) + context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}} + + spec := cephv1.ClusterSpec{ + Placement: cephv1.PlacementSpec{ + "all": { + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{{ + Key: "role", + Operator: v1.NodeSelectorOpIn, + Values: []string{"storage-node1"}, + }}, + }, + }, + }, + }, + }, + "osd": { + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{{ + Key: "role", + Operator: v1.NodeSelectorOpIn, + Values: []string{"storage-node1"}, + }}, + }, + }, + }, + }, + }, + "prepareosd": { + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{{ + Key: "role", + Operator: v1.NodeSelectorOpIn, + Values: []string{"storage-node1"}, + }}, + }, + }, + }, + }, + }, + }, + Storage: cephv1.StorageScopeSpec{ + OnlyApplyOSDPlacement: false, + }, + } + + osdProps := osdProperties{ + pvc: v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc1", + }, + } + osdProps.placement = cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "role", + Operator: v1.NodeSelectorOpIn, + Values: []string{"storage-node3"}, + }, + }, + }, + }, + }, + }, + } + + osdProps.preparePlacement = &cephv1.Placement{NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "role", + Operator: v1.NodeSelectorOpIn, + Values: []string{"storage-node3"}, + }, + }, + }, + }, + }, + }, + } + + c := New(context, clusterInfo, spec, "rook/rook:myversion") + osd := OSDInfo{ + ID: 0, + CVMode: "raw", + } + + dataPathMap := &provisionConfig{ + DataPathMap: opconfig.NewDatalessDaemonDataPathMap(c.clusterInfo.Namespace, "/var/lib/rook"), + } + + // For OSD daemon + // When OnlyApplyOSDPlacement false, in case of PVC + r, err := c.makeDeployment(osdProps, osd, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // For OSD-prepare job + job, err := c.makeJob(osdProps, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 2, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // When OnlyApplyOSDPlacement true, in case of PVC + spec.Storage.OnlyApplyOSDPlacement = true + c = New(context, clusterInfo, spec, "rook/rook:myversion") + r, err = c.makeDeployment(osdProps, osd, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // For OSD-prepare job + job, err = c.makeJob(osdProps, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 1, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // When OnlyApplyOSDPlacement false, in case of non-PVC + spec.Storage.OnlyApplyOSDPlacement = false + osdProps = osdProperties{} + c = New(context, clusterInfo, spec, "rook/rook:myversion") + r, err = c.makeDeployment(osdProps, osd, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // For OSD-prepare job + job, err = c.makeJob(osdProps, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 2, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // When OnlyApplyOSDPlacement true, in case of non-PVC + spec.Storage.OnlyApplyOSDPlacement = true + c = New(context, clusterInfo, spec, "rook/rook:myversion") + r, err = c.makeDeployment(osdProps, osd, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + + // For OSD-prepare job + job, err = c.makeJob(osdProps, dataPathMap) + assert.NoError(t, err) + assert.Equal(t, 1, len(job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) +} diff --git a/pkg/operator/ceph/cluster/osd/status.go b/pkg/operator/ceph/cluster/osd/status.go index 9943c7d0b517..e1fc4ea88d84 100644 --- a/pkg/operator/ceph/cluster/osd/status.go +++ b/pkg/operator/ceph/cluster/osd/status.go @@ -99,7 +99,7 @@ func (e *provisionErrors) asMessages() string { // return name of status ConfigMap func (c *Cluster) updateOSDStatus(node string, status OrchestrationStatus) string { - return UpdateNodeStatus(c.kv, node, status) + return UpdateNodeOrPVCStatus(c.kv, node, status) } func statusConfigMapLabels(node string) map[string]string { @@ -110,14 +110,14 @@ func statusConfigMapLabels(node string) map[string]string { } } -// UpdateNodeStatus updates the status ConfigMap for the OSD on the given node. It returns the name +// UpdateNodeOrPVCStatus updates the status ConfigMap for the OSD on the given node or PVC. It returns the name // the ConfigMap used. -func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus) string { - labels := statusConfigMapLabels(node) +func UpdateNodeOrPVCStatus(kv *k8sutil.ConfigMapKVStore, nodeOrPVC string, status OrchestrationStatus) string { + labels := statusConfigMapLabels(nodeOrPVC) // update the status map with the given status now s, _ := json.Marshal(status) - cmName := statusConfigMapName(node) + cmName := statusConfigMapName(nodeOrPVC) if err := kv.SetValueWithLabels( cmName, orchestrationStatusKey, @@ -125,7 +125,7 @@ func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status Orchestr labels, ); err != nil { // log the error, but allow the orchestration to continue even if the status update failed - logger.Errorf("failed to set node %q status to %q for osd orchestration. %s", node, status.Status, status.Message) + logger.Errorf("failed to set node or PVC %q status to %q for osd orchestration. %s", nodeOrPVC, status.Status, status.Message) } return cmName } @@ -133,7 +133,7 @@ func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status Orchestr func (c *Cluster) handleOrchestrationFailure(errors *provisionErrors, nodeName, message string, args ...interface{}) { errors.addError(message, args...) status := OrchestrationStatus{Status: OrchestrationStatusFailed, Message: message} - UpdateNodeStatus(c.kv, nodeName, status) + UpdateNodeOrPVCStatus(c.kv, nodeName, status) } func parseOrchestrationStatus(data map[string]string) *OrchestrationStatus { diff --git a/pkg/operator/ceph/cluster/osd/status_test.go b/pkg/operator/ceph/cluster/osd/status_test.go index 3db934e911ee..2b3eafe6f910 100644 --- a/pkg/operator/ceph/cluster/osd/status_test.go +++ b/pkg/operator/ceph/cluster/osd/status_test.go @@ -56,7 +56,7 @@ func TestOrchestrationStatus(t *testing.T) { // update the status map with some status status := OrchestrationStatus{Status: OrchestrationStatusOrchestrating, Message: "doing work"} - UpdateNodeStatus(kv, nodeName, status) + UpdateNodeOrPVCStatus(kv, nodeName, status) // retrieve the status and verify it statusMap, err := c.context.Clientset.CoreV1().ConfigMaps(c.clusterInfo.Namespace).Get(ctx, cmName, metav1.GetOptions{}) @@ -94,7 +94,7 @@ func mockNodeOrchestrationCompletion(c *Cluster, nodeName string, statusMapWatch }, Status: OrchestrationStatusCompleted, } - UpdateNodeStatus(c.kv, nodeName, *status) + UpdateNodeOrPVCStatus(c.kv, nodeName, *status) // 2) call modify on the fake watcher so a watch event will get triggered s, _ := json.Marshal(status) diff --git a/pkg/operator/ceph/cluster/rbd/config.go b/pkg/operator/ceph/cluster/rbd/config.go index ed20f4deda48..03a8296ae920 100644 --- a/pkg/operator/ceph/cluster/rbd/config.go +++ b/pkg/operator/ceph/cluster/rbd/config.go @@ -83,6 +83,8 @@ func (r *ReconcileCephRBDMirror) reconcileAddBoostrapPeer(cephRBDMirror *cephv1. ctx := context.TODO() // List all the peers secret, we can have more than one peer we might want to configure // For each, get the Kubernetes Secret and import the "peer token" so that we can configure the mirroring + + logger.Warning("(DEPRECATED) use of peer secret names in CephRBDMirror is deprecated. Please use CephBlockPool CR to configure peer secret names and import peers.") for _, peerSecret := range cephRBDMirror.Spec.Peers.SecretNames { logger.Debugf("fetching bootstrap peer kubernetes secret %q", peerSecret) s, err := r.context.Clientset.CoreV1().Secrets(r.clusterInfo.Namespace).Get(ctx, peerSecret, metav1.GetOptions{}) diff --git a/pkg/operator/ceph/cluster/rbd/controller.go b/pkg/operator/ceph/cluster/rbd/controller.go index a2ef91d51c6b..2a220bc09a77 100644 --- a/pkg/operator/ceph/cluster/rbd/controller.go +++ b/pkg/operator/ceph/cluster/rbd/controller.go @@ -191,7 +191,7 @@ func (r *ReconcileCephRBDMirror) reconcile(request reconcile.Request) (reconcile } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) return reconcileResponse, nil diff --git a/pkg/operator/ceph/cluster/version.go b/pkg/operator/ceph/cluster/version.go index 8e436d888e3c..b4df66b345a5 100644 --- a/pkg/operator/ceph/cluster/version.go +++ b/pkg/operator/ceph/cluster/version.go @@ -105,7 +105,8 @@ func diffImageSpecAndClusterRunningVersion(imageSpecVersion cephver.CephVersion, } if cephver.IsInferior(imageSpecVersion, clusterRunningVersion) { - return true, errors.Errorf("image spec version %s is lower than the running cluster version %s, downgrading is not supported", imageSpecVersion.String(), clusterRunningVersion.String()) + logger.Warningf("image spec version %s is lower than the running cluster version %s, downgrading is not supported", imageSpecVersion.String(), clusterRunningVersion.String()) + return true, nil } } } diff --git a/pkg/operator/ceph/cluster/version_test.go b/pkg/operator/ceph/cluster/version_test.go index 151e27f2c0fe..8a79a511ba19 100755 --- a/pkg/operator/ceph/cluster/version_test.go +++ b/pkg/operator/ceph/cluster/version_test.go @@ -35,8 +35,8 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { fakeRunningVersions := []byte(` { "mon": { - "ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)": 1, - "ceph version 14.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, + "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 } }`) var dummyRunningVersions cephv1.CephDaemonsVersions @@ -51,8 +51,8 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { fakeRunningVersions = []byte(` { "overall": { - "ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)": 1, - "ceph version 14.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) pacific (stable)": 1, + "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2 } }`) var dummyRunningVersions2 cephv1.CephDaemonsVersions @@ -74,16 +74,17 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { err = json.Unmarshal([]byte(fakeRunningVersions), &dummyRunningVersions3) assert.NoError(t, err) + // Allow the downgrade m, err = diffImageSpecAndClusterRunningVersion(fakeImageVersion, dummyRunningVersions3) - assert.Error(t, err) + assert.NoError(t, err) assert.True(t, m) // 4 test - spec version is higher than running cluster --> we upgrade - fakeImageVersion = cephver.Nautilus + fakeImageVersion = cephver.Pacific fakeRunningVersions = []byte(` { "overall": { - "ceph version 13.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) mimic (stable)": 2 + "ceph version 15.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) octopus (stable)": 2 } }`) var dummyRunningVersions4 cephv1.CephDaemonsVersions @@ -95,12 +96,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { assert.True(t, m) // 5 test - spec version and running cluster versions are identical --> we upgrade - fakeImageVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 2, + fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 2, CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"} fakeRunningVersions = []byte(` { "overall": { - "ceph version 14.2.2 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.2 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 } }`) var dummyRunningVersions5 cephv1.CephDaemonsVersions @@ -112,12 +113,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { assert.False(t, m) // 6 test - spec version and running cluster have different commit ID - fakeImageVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 11, Build: 139, + fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 11, Build: 139, CommitID: "5c0dc966af809fd1d429ec7bac48962a746af243"} fakeRunningVersions = []byte(` { "overall": { - "ceph version 14.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 } }`) var dummyRunningVersions6 cephv1.CephDaemonsVersions @@ -129,12 +130,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) { assert.True(t, m) // 7 test - spec version and running cluster have same commit ID - fakeImageVersion = cephver.CephVersion{Major: 14, Minor: 2, Extra: 11, Build: 139, + fakeImageVersion = cephver.CephVersion{Major: 16, Minor: 2, Extra: 11, Build: 139, CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"} fakeRunningVersions = []byte(` { "overall": { - "ceph version 14.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 2 + "ceph version 16.2.11-139.el8cp (3a54b2b6d167d4a2a19e003a705696d4fe619afc) pacific (stable)": 2 } }`) var dummyRunningVersions7 cephv1.CephDaemonsVersions diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go index ac7994e6f3ca..9e3bf676c37d 100644 --- a/pkg/operator/ceph/cluster/watcher.go +++ b/pkg/operator/ceph/cluster/watcher.go @@ -19,15 +19,17 @@ package cluster import ( "context" + "strings" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" discoverDaemon "github.com/rook/rook/pkg/daemon/discover" + opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -39,7 +41,7 @@ type clientCluster struct { context *clusterd.Context } -var nodesCheckedForReconcile = util.NewSet() +var nodesCheckedForReconcile = sets.NewString() func newClientCluster(client client.Client, namespace string, context *clusterd.Context) *clientCluster { return &clientCluster{ @@ -64,7 +66,7 @@ func (c *clientCluster) onK8sNode(object runtime.Object) bool { return false } // skip reconcile if node is already checked in a previous reconcile - if nodesCheckedForReconcile.Contains(node.Name) { + if nodesCheckedForReconcile.Has(node.Name) { return false } // Get CephCluster @@ -81,7 +83,7 @@ func (c *clientCluster) onK8sNode(object runtime.Object) bool { } if !checkStorageForNode(cluster) { - nodesCheckedForReconcile.Add(node.Name) + nodesCheckedForReconcile.Insert(node.Name) return false } @@ -92,7 +94,7 @@ func (c *clientCluster) onK8sNode(object runtime.Object) bool { } logger.Debugf("node %q is ready, checking if it can run OSDs", node.Name) - nodesCheckedForReconcile.Add(node.Name) + nodesCheckedForReconcile.Insert(node.Name) valid, _ := k8sutil.ValidNode(*node, cephv1.GetOSDPlacement(cluster.Spec.Placement)) if valid { nodeName := node.Name @@ -107,6 +109,10 @@ func (c *clientCluster) onK8sNode(object runtime.Object) bool { clusterInfo := cephclient.AdminClusterInfo(cluster.Namespace) osds, err := cephclient.GetOSDOnHost(c.context, clusterInfo, nodeName) if err != nil { + if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { + logger.Debug(opcontroller.OperatorNotInitializedMessage) + return false + } // If it fails, this might be due to the the operator just starting and catching an add event for that node logger.Debugf("failed to get osds on node %q, assume reconcile is necessary", nodeName) return true diff --git a/pkg/operator/ceph/config/livenessprobe.go b/pkg/operator/ceph/config/livenessprobe.go index 0c6df3019e8d..59802945e81b 100644 --- a/pkg/operator/ceph/config/livenessprobe.go +++ b/pkg/operator/ceph/config/livenessprobe.go @@ -44,7 +44,7 @@ func ConfigureLivenessProbe(daemon rook.KeyType, container v1.Container, healthC // If the spec value is not empty, let's apply it along with default when some fields are not specified if probe != nil { // Set the liveness probe on the container to overwrite the default probe created by Rook - container.LivenessProbe = GetLivenessProbeWithDefaults(probe, container.LivenessProbe) + container.LivenessProbe = GetProbeWithDefaults(probe, container.LivenessProbe) } } } @@ -52,7 +52,7 @@ func ConfigureLivenessProbe(daemon rook.KeyType, container v1.Container, healthC return container } -func GetLivenessProbeWithDefaults(desiredProbe, currentProbe *v1.Probe) *v1.Probe { +func GetProbeWithDefaults(desiredProbe, currentProbe *v1.Probe) *v1.Probe { newProbe := *desiredProbe // Do not replace the handler with the previous one! diff --git a/pkg/operator/ceph/config/livenessprobe_test.go b/pkg/operator/ceph/config/livenessprobe_test.go index e2430392e8b9..8e8735ed873f 100644 --- a/pkg/operator/ceph/config/livenessprobe_test.go +++ b/pkg/operator/ceph/config/livenessprobe_test.go @@ -75,7 +75,7 @@ func configLivenessProbeHelper(t *testing.T, keyType rook.KeyType) { } } -func TestGetLivenessProbeWithDefaults(t *testing.T) { +func TestGetProbeWithDefaults(t *testing.T) { t.Run("using default probe", func(t *testing.T) { currentProb := &v1.Probe{ Handler: v1.Handler{ @@ -94,7 +94,7 @@ func TestGetLivenessProbeWithDefaults(t *testing.T) { } // in case of default probe desiredProbe := &v1.Probe{} - desiredProbe = GetLivenessProbeWithDefaults(desiredProbe, currentProb) + desiredProbe = GetProbeWithDefaults(desiredProbe, currentProb) assert.Equal(t, desiredProbe, currentProb) }) @@ -134,7 +134,7 @@ func TestGetLivenessProbeWithDefaults(t *testing.T) { SuccessThreshold: 4, TimeoutSeconds: 5, } - desiredProbe = GetLivenessProbeWithDefaults(desiredProbe, currentProb) + desiredProbe = GetProbeWithDefaults(desiredProbe, currentProb) assert.Equal(t, desiredProbe.Exec.Command, []string{"env", "-i", "sh", "-c", "ceph --admin-daemon /run/ceph/ceph-mon.c.asok mon_status"}) assert.Equal(t, desiredProbe.InitialDelaySeconds, int32(1)) assert.Equal(t, desiredProbe.FailureThreshold, int32(2)) diff --git a/pkg/operator/ceph/config/monstore.go b/pkg/operator/ceph/config/monstore.go index ec12b3152c02..06c562b8ea1c 100644 --- a/pkg/operator/ceph/config/monstore.go +++ b/pkg/operator/ceph/config/monstore.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/util/exec" ) // MonStore provides methods for setting Ceph configurations in the centralized mon @@ -52,13 +53,29 @@ type Option struct { Value string } +func (m *MonStore) SetIfChanged(who, option, value string) (bool, error) { + currentVal, err := m.Get(who, option) + if err != nil { + return false, errors.Wrapf(err, "failed to get value %q", option) + } + if currentVal == value { + // no need to update the setting + return false, nil + } + + if err := m.Set(who, option, value); err != nil { + return false, errors.Wrapf(err, "failed to set value %s=%s", option, value) + } + return true, nil +} + // Set sets a config in the centralized mon configuration database. // https://docs.ceph.com/docs/master/rados/configuration/ceph-conf/#monitor-configuration-database func (m *MonStore) Set(who, option, value string) error { logger.Infof("setting %q=%q=%q option to the mon configuration database", who, option, value) args := []string{"config", "set", who, normalizeKey(option), value} cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() + out, err := cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { return errors.Wrapf(err, "failed to set ceph config in the centralized mon configuration database; "+ "you may need to use the rook-config-override ConfigMap. output: %s", string(out)) @@ -73,7 +90,7 @@ func (m *MonStore) Delete(who, option string) error { logger.Infof("deleting %q option from the mon configuration database", option) args := []string{"config", "rm", who, normalizeKey(option)} cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() + out, err := cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { return errors.Wrapf(err, "failed to delete ceph config in the centralized mon configuration database. output: %s", string(out)) @@ -88,7 +105,7 @@ func (m *MonStore) Delete(who, option string) error { func (m *MonStore) Get(who, option string) (string, error) { args := []string{"config", "get", who, normalizeKey(option)} cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() + out, err := cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { return "", errors.Wrapf(err, "failed to get config setting %q for user %q", option, who) } @@ -99,7 +116,7 @@ func (m *MonStore) Get(who, option string) (string, error) { func (m *MonStore) GetDaemon(who string) ([]Option, error) { args := []string{"config", "get", who} cephCmd := client.NewCephCommand(m.context, m.clusterInfo, args) - out, err := cephCmd.Run() + out, err := cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { return []Option{}, errors.Wrapf(err, "failed to get config for daemon %q. output: %s", who, string(out)) } diff --git a/pkg/operator/ceph/config/monstore_test.go b/pkg/operator/ceph/config/monstore_test.go index 1d0978a26ae5..2d9bd74f682d 100644 --- a/pkg/operator/ceph/config/monstore_test.go +++ b/pkg/operator/ceph/config/monstore_test.go @@ -20,6 +20,7 @@ import ( "reflect" "strings" "testing" + "time" "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" @@ -41,8 +42,8 @@ func TestMonStore_Set(t *testing.T) { // us to cause it to return an error when it detects a keyword. execedCmd := "" execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { + executor.MockExecuteCommandWithTimeout = + func(timeout time.Duration, command string, args ...string) (string, error) { execedCmd = command + " " + strings.Join(args, " ") if execInjectErr { return "output from cmd with error", errors.New("mocked error") @@ -86,8 +87,8 @@ func TestMonStore_Delete(t *testing.T) { // us to cause it to return an error when it detects a keyword. execedCmd := "" execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { + executor.MockExecuteCommandWithTimeout = + func(timeout time.Duration, command string, args ...string) (string, error) { execedCmd = command + " " + strings.Join(args, " ") if execInjectErr { return "output from cmd with error", errors.New("mocked error") @@ -125,8 +126,8 @@ func TestMonStore_GetDaemon(t *testing.T) { "\"rgw_enable_usage_log\":{\"value\":\"true\",\"section\":\"client.rgw.test.a\",\"mask\":{}," + "\"can_update_at_runtime\":true}}" execInjectErr := false - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { + executor.MockExecuteCommandWithTimeout = + func(timeout time.Duration, command string, args ...string) (string, error) { execedCmd = command + " " + strings.Join(args, " ") if execInjectErr { return "output from cmd with error", errors.New("mocked error") @@ -171,8 +172,8 @@ func TestMonStore_DeleteDaemon(t *testing.T) { "\"can_update_at_runtime\":true}," + "\"rgw_enable_usage_log\":{\"value\":\"true\",\"section\":\"client.rgw.test.a\",\"mask\":{}," + "\"can_update_at_runtime\":true}}" - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { + executor.MockExecuteCommandWithTimeout = + func(timeout time.Duration, command string, args ...string) (string, error) { execedCmd = command + " " + strings.Join(args, " ") return execReturn, nil } @@ -197,8 +198,8 @@ func TestMonStore_SetAll(t *testing.T) { // us to cause it to return an error when it detects a keyword. execedCmds := []string{} execInjectErrOnKeyword := "donotinjectanerror" - executor.MockExecuteCommandWithOutput = - func(command string, args ...string) (string, error) { + executor.MockExecuteCommandWithTimeout = + func(timeout time.Duration, command string, args ...string) (string, error) { execedCmd := command + " " + strings.Join(args, " ") execedCmds = append(execedCmds, execedCmd) k := execInjectErrOnKeyword diff --git a/pkg/operator/ceph/controller/controller_utils.go b/pkg/operator/ceph/controller/controller_utils.go index 7556e539bbc0..cc74e83e0be7 100644 --- a/pkg/operator/ceph/controller/controller_utils.go +++ b/pkg/operator/ceph/controller/controller_utils.go @@ -21,12 +21,14 @@ import ( "errors" "fmt" "reflect" + "strconv" "strings" "time" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/rook/pkg/util/exec" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,6 +83,17 @@ func DiscoveryDaemonEnabled(context *clusterd.Context) bool { return value == "true" } +// SetCephCommandsTimeout sets the timeout value of Ceph commands which are executed from Rook +func SetCephCommandsTimeout(context *clusterd.Context) { + strTimeoutSeconds, _ := k8sutil.GetOperatorSetting(context.Clientset, OperatorSettingConfigMapName, "ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS", "15") + timeoutSeconds, err := strconv.Atoi(strTimeoutSeconds) + if err != nil || timeoutSeconds < 1 { + logger.Warningf("ROOK_CEPH_COMMANDS_TIMEOUT is %q but it should be >= 1, set the default value 15", strTimeoutSeconds) + timeoutSeconds = 15 + } + exec.CephCommandsTimeout = time.Duration(timeoutSeconds) * time.Second +} + // CheckForCancelledOrchestration checks whether a cancellation has been requested func CheckForCancelledOrchestration(context *clusterd.Context) error { defer context.RequestCancelOrchestration.UnSet() @@ -113,7 +126,7 @@ func canIgnoreHealthErrStatusInReconcile(cephCluster cephv1.CephCluster, control } // IsReadyToReconcile determines if a controller is ready to reconcile or not -func IsReadyToReconcile(c client.Client, clustercontext *clusterd.Context, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { +func IsReadyToReconcile(c client.Client, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { cephClusterExists := false // Running ceph commands won't work and the controller will keep re-queuing so I believe it's fine not to check @@ -129,9 +142,15 @@ func IsReadyToReconcile(c client.Client, clustercontext *clusterd.Context, names logger.Debugf("%q: no CephCluster resource found in namespace %q", controllerName, namespacedName.Namespace) return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady } - cephClusterExists = true cephCluster = clusterList.Items[0] + // If the cluster has a cleanup policy to destroy the cluster and it has been marked for deletion, treat it as if it does not exist + if cephCluster.Spec.CleanupPolicy.HasDataDirCleanPolicy() && !cephCluster.DeletionTimestamp.IsZero() { + logger.Infof("%q: CephCluster %q has a destructive cleanup policy, allowing resources to be deleted", controllerName, namespacedName) + return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady + } + + cephClusterExists = true logger.Debugf("%q: CephCluster resource %q found in namespace %q", controllerName, cephCluster.Name, namespacedName.Namespace) // read the CR status of the cluster diff --git a/pkg/operator/ceph/controller/controller_utils_test.go b/pkg/operator/ceph/controller/controller_utils_test.go index 0fbdb1649f35..c83f66dc2f5e 100644 --- a/pkg/operator/ceph/controller/controller_utils_test.go +++ b/pkg/operator/ceph/controller/controller_utils_test.go @@ -17,10 +17,21 @@ limitations under the License. package controller import ( + "context" "testing" + "time" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/util/exec" "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kfake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func CreateTestClusterFromStatusDetails(details map[string]cephv1.CephHealthMessage) cephv1.CephCluster { @@ -70,3 +81,115 @@ func TestCanIgnoreHealthErrStatusInReconcile(t *testing.T) { }) assert.False(t, canIgnoreHealthErrStatusInReconcile(cluster, "controller")) } + +func TestSetCephCommandsTimeout(t *testing.T) { + clientset := kfake.NewSimpleClientset() + ctx := context.TODO() + cm := &v1.ConfigMap{} + cm.Name = "rook-ceph-operator-config" + _, err := clientset.CoreV1().ConfigMaps("").Create(ctx, cm, metav1.CreateOptions{}) + assert.NoError(t, err) + context := &clusterd.Context{Clientset: clientset} + + SetCephCommandsTimeout(context) + assert.Equal(t, 15*time.Second, exec.CephCommandsTimeout) + + exec.CephCommandsTimeout = 0 + cm.Data = map[string]string{"ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS": "0"} + _, err = clientset.CoreV1().ConfigMaps("").Update(ctx, cm, metav1.UpdateOptions{}) + assert.NoError(t, err) + SetCephCommandsTimeout(context) + assert.Equal(t, 15*time.Second, exec.CephCommandsTimeout) + + exec.CephCommandsTimeout = 0 + cm.Data = map[string]string{"ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS": "1"} + _, err = clientset.CoreV1().ConfigMaps("").Update(ctx, cm, metav1.UpdateOptions{}) + assert.NoError(t, err) + SetCephCommandsTimeout(context) + assert.Equal(t, 1*time.Second, exec.CephCommandsTimeout) +} + +func TestIsReadyToReconcile(t *testing.T) { + scheme := scheme.Scheme + scheme.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) + + controllerName := "testing" + clusterName := types.NamespacedName{Name: "mycluster", Namespace: "myns"} + + t.Run("non-existent cephcluster", func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects().Build() + c, ready, clusterExists, reconcileResult := IsReadyToReconcile(client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) + assert.False(t, clusterExists) + assert.Equal(t, WaitForRequeueIfCephClusterNotReady, reconcileResult) + }) + + t.Run("valid cephcluster", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{} + objects := []runtime.Object{cephCluster} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + c, ready, clusterExists, reconcileResult := IsReadyToReconcile(client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) + assert.False(t, clusterExists) + assert.Equal(t, WaitForRequeueIfCephClusterNotReady, reconcileResult) + }) + + t.Run("deleted cephcluster with no cleanup policy", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName.Name, + Namespace: clusterName.Namespace, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + objects := []runtime.Object{cephCluster} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + c, ready, clusterExists, reconcileResult := IsReadyToReconcile(client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) + assert.True(t, clusterExists) + assert.Equal(t, WaitForRequeueIfCephClusterNotReady, reconcileResult) + }) + + t.Run("cephcluster with cleanup policy when not deleted", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName.Name, + Namespace: clusterName.Namespace, + }, + Spec: cephv1.ClusterSpec{ + CleanupPolicy: cephv1.CleanupPolicySpec{ + Confirmation: cephv1.DeleteDataDirOnHostsConfirmation, + }, + }} + objects := []runtime.Object{cephCluster} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + c, ready, clusterExists, _ := IsReadyToReconcile(client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) + assert.True(t, clusterExists) + }) + + t.Run("cephcluster with cleanup policy when deleted", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName.Name, + Namespace: clusterName.Namespace, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: cephv1.ClusterSpec{ + CleanupPolicy: cephv1.CleanupPolicySpec{ + Confirmation: cephv1.DeleteDataDirOnHostsConfirmation, + }, + }} + objects := []runtime.Object{cephCluster} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + c, ready, clusterExists, _ := IsReadyToReconcile(client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) + assert.False(t, clusterExists) + }) +} diff --git a/pkg/operator/ceph/controller/mirror_peer.go b/pkg/operator/ceph/controller/mirror_peer.go index 163bab20be90..d033488e9bd6 100644 --- a/pkg/operator/ceph/controller/mirror_peer.go +++ b/pkg/operator/ceph/controller/mirror_peer.go @@ -18,7 +18,8 @@ limitations under the License. package controller import ( - "context" + "encoding/base64" + "encoding/json" "fmt" "github.com/pkg/errors" @@ -29,10 +30,7 @@ import ( v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -41,25 +39,15 @@ const ( poolMirrorBoostrapPeerSecretName = "pool-peer-token" // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name fsMirrorBoostrapPeerSecretName = "fs-peer-token" + // #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name + clusterMirrorBoostrapPeerSecretName = "cluster-peer-token" // RBDMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name RBDMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName" // FSMirrorBootstrapPeerSecretName #nosec G101 since this is not leaking any hardcoded credentials, it's just the prefix of the secret name FSMirrorBootstrapPeerSecretName = "fsMirrorBootstrapPeerSecretName" ) -// PeerToken is the content of the peer token -type PeerToken struct { - ClusterFSID string `json:"fsid"` - ClientID string `json:"client_id"` - Key string `json:"key"` - MonHost string `json:"mon_host"` - // These fields are added by Rook and NOT part of the output of client.CreateRBDMirrorBootstrapPeer() - PoolID int `json:"pool_id"` - Namespace string `json:"namespace"` -} - -func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, namespacedName types.NamespacedName, scheme *runtime.Scheme) (reconcile.Result, error) { - context := context.TODO() +func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, ownerInfo *k8sutil.OwnerInfo) (reconcile.Result, error) { var err error var ns, name, daemonType string var boostrapToken []byte @@ -73,6 +61,28 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl if err != nil { return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) } + + // Add additional information to the peer token + boostrapToken, err = expandBootstrapPeerToken(ctx, clusterInfo, boostrapToken) + if err != nil { + return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") + } + + case *cephv1.CephCluster: + ns = objectType.Namespace + daemonType = "cluster-rbd" + // Create rbd mirror bootstrap peer token + boostrapToken, err = cephclient.CreateRBDMirrorBootstrapPeerWithoutPool(ctx, clusterInfo) + if err != nil { + return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) + } + + // Add additional information to the peer token + boostrapToken, err = expandBootstrapPeerToken(ctx, clusterInfo, boostrapToken) + if err != nil { + return ImmediateRetryResult, errors.Wrap(err, "failed to add extra information to rbd-mirror bootstrap peer") + } + case *cephv1.CephFilesystem: ns = objectType.Namespace name = objectType.Name @@ -81,6 +91,7 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl if err != nil { return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer", daemonType) } + default: return ImmediateRetryResult, errors.Wrap(err, "failed to create bootstrap peer unknown daemon type") } @@ -89,14 +100,14 @@ func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.Cl s := GenerateBootstrapPeerSecret(object, boostrapToken) // set ownerref to the Secret - err = controllerutil.SetControllerReference(object, s, scheme) + err = ownerInfo.SetControllerReference(s) if err != nil { return ImmediateRetryResult, errors.Wrapf(err, "failed to set owner reference for %s-mirror bootstrap peer secret %q", daemonType, s.Name) } // Create Secret - logger.Debugf("store %s-mirror bootstrap token in a Kubernetes Secret %q", daemonType, s.Name) - _, err = ctx.Clientset.CoreV1().Secrets(ns).Create(context, s, metav1.CreateOptions{}) + logger.Debugf("store %s-mirror bootstrap token in a Kubernetes Secret %q in namespace %q", daemonType, s.Name, ns) + _, err = k8sutil.CreateOrUpdateSecret(ctx.Clientset, s) if err != nil && !kerrors.IsAlreadyExists(err) { return ImmediateRetryResult, errors.Wrapf(err, "failed to create %s-mirror bootstrap peer %q secret", daemonType, s.Name) } @@ -117,6 +128,10 @@ func GenerateBootstrapPeerSecret(object client.Object, token []byte) *v1.Secret entityType = "pool" entityName = objectType.Name entityNamespace = objectType.Namespace + case *cephv1.CephCluster: + entityType = "cluster" + entityName = objectType.Name + entityNamespace = objectType.Namespace } s := &v1.Secret{ @@ -140,6 +155,8 @@ func buildBoostrapPeerSecretName(object client.Object) string { return fmt.Sprintf("%s-%s", fsMirrorBoostrapPeerSecretName, objectType.Name) case *cephv1.CephBlockPool: return fmt.Sprintf("%s-%s", poolMirrorBoostrapPeerSecretName, objectType.Name) + case *cephv1.CephCluster: + return fmt.Sprintf("%s-%s", clusterMirrorBoostrapPeerSecretName, objectType.Name) } return "" @@ -166,7 +183,7 @@ func ValidatePeerToken(object client.Object, data map[string][]byte) error { // Lookup Secret keys and content keysToTest := []string{"token"} switch object.(type) { - case *cephv1.CephBlockPool: + case *cephv1.CephRBDMirror: keysToTest = append(keysToTest, "pool") } @@ -179,3 +196,29 @@ func ValidatePeerToken(object client.Object, data map[string][]byte) error { return nil } + +func expandBootstrapPeerToken(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, token []byte) ([]byte, error) { + // First decode the token, it's base64 encoded + decodedToken, err := base64.StdEncoding.DecodeString(string(token)) + if err != nil { + return nil, errors.Wrap(err, "failed to decode bootstrap peer token") + } + + // Unmarshal the decoded value to a Go type + var decodedTokenToGo cephclient.PeerToken + err = json.Unmarshal(decodedToken, &decodedTokenToGo) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal decoded token") + } + + decodedTokenToGo.Namespace = clusterInfo.Namespace + + // Marshal the Go type back to JSON + decodedTokenBackToJSON, err := json.Marshal(decodedTokenToGo) + if err != nil { + return nil, errors.Wrap(err, "failed to encode go type back to json") + } + + // Return the base64 encoded token + return []byte(base64.StdEncoding.EncodeToString(decodedTokenBackToJSON)), nil +} diff --git a/pkg/operator/ceph/controller/mirror_peer_test.go b/pkg/operator/ceph/controller/mirror_peer_test.go index fe4bb47fc810..236b966dde8d 100644 --- a/pkg/operator/ceph/controller/mirror_peer_test.go +++ b/pkg/operator/ceph/controller/mirror_peer_test.go @@ -18,17 +18,22 @@ limitations under the License. package controller import ( + "encoding/base64" "reflect" "testing" + "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" "sigs.k8s.io/controller-runtime/pkg/client" ) func TestValidatePeerToken(t *testing.T) { // Error: map is empty - b := &cephv1.CephBlockPool{} + b := &cephv1.CephRBDMirror{} data := map[string][]byte{} err := ValidatePeerToken(b, data) assert.Error(t, err) @@ -43,13 +48,18 @@ func TestValidatePeerToken(t *testing.T) { err = ValidatePeerToken(b, data) assert.Error(t, err) - // Success CephBlockPool + // Success CephRBDMirror data["pool"] = []byte("foo") err = ValidatePeerToken(b, data) assert.NoError(t, err) // Success CephFilesystem - data["pool"] = []byte("foo") + // "pool" is not required here + delete(data, "pool") + err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) + assert.NoError(t, err) + + // Success CephFilesystem err = ValidatePeerToken(&cephv1.CephFilesystemMirror{}, data) assert.NoError(t, err) } @@ -73,3 +83,24 @@ func TestGenerateStatusInfo(t *testing.T) { }) } } + +func TestExpandBootstrapPeerToken(t *testing.T) { + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if reflect.DeepEqual(args[0:5], []string{"osd", "pool", "get", "pool", "all"}) { + return `{"pool_id":13}`, nil + } + + return "", errors.Errorf("unknown command args: %s", args[0:5]) + }, + } + c := &clusterd.Context{ + Executor: executor, + } + + newToken, err := expandBootstrapPeerToken(c, cephclient.AdminClusterInfo("mu-cluster"), []byte(`eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==`)) + assert.NoError(t, err) + newTokenDecoded, err := base64.StdEncoding.DecodeString(string(newToken)) + assert.NoError(t, err) + assert.Contains(t, string(newTokenDecoded), "namespace") +} diff --git a/pkg/operator/ceph/controller/spec.go b/pkg/operator/ceph/controller/spec.go index e5baa2f1c30b..a076f5e6c804 100644 --- a/pkg/operator/ceph/controller/spec.go +++ b/pkg/operator/ceph/controller/spec.go @@ -67,8 +67,6 @@ var logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-spec") var ( cronLogRotate = ` -set -xe - CEPH_CLIENT_ID=%s PERIODICITY=%s LOG_ROTATE_CEPH_FILE=/etc/logrotate.d/ceph @@ -607,12 +605,13 @@ func (c *daemonConfig) buildAdminSocketCommand() string { return command } +func HostPathRequiresPrivileged() bool { + return os.Getenv("ROOK_HOSTPATH_REQUIRES_PRIVILEGED") == "true" +} + // PodSecurityContext detects if the pod needs privileges to run func PodSecurityContext() *v1.SecurityContext { - privileged := false - if os.Getenv("ROOK_HOSTPATH_REQUIRES_PRIVILEGED") == "true" { - privileged = true - } + privileged := HostPathRequiresPrivileged() return &v1.SecurityContext{ Privileged: &privileged, @@ -625,13 +624,18 @@ func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Contai Name: logCollector, Command: []string{ "/bin/bash", - "-c", + "-x", // Print commands and their arguments as they are executed + "-e", // Exit immediately if a command exits with a non-zero status. + "-m", // Terminal job control, allows job to be terminated by SIGTERM + "-c", // Command to run fmt.Sprintf(cronLogRotate, daemonID, c.LogCollector.Periodicity), }, Image: c.CephVersion.Image, VolumeMounts: DaemonVolumeMounts(config.NewDatalessDaemonDataPathMap(ns, c.DataDirHostPath), ""), SecurityContext: PodSecurityContext(), Resources: cephv1.GetLogCollectorResources(c.Resources), + // We need a TTY for the bash job control (enabled by -m) + TTY: true, } } diff --git a/pkg/operator/ceph/csi/betav1csidriver.go b/pkg/operator/ceph/csi/betav1csidriver.go index 3d33ecd144e6..4d98941ab5ad 100644 --- a/pkg/operator/ceph/csi/betav1csidriver.go +++ b/pkg/operator/ceph/csi/betav1csidriver.go @@ -72,6 +72,8 @@ func (d beta1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kuber // As FSGroupPolicy field is immutable, should be set only during create time. // if the request is to change the FSGroupPolicy, we are deleting the CSIDriver object and creating it. if driver.Spec.FSGroupPolicy != nil && csiDriver.Spec.FSGroupPolicy != nil && *driver.Spec.FSGroupPolicy != *csiDriver.Spec.FSGroupPolicy { + d.csiClient = csidrivers + d.csiDriver = csiDriver return d.reCreateCSIDriverInfo(ctx) } @@ -88,18 +90,16 @@ func (d beta1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kuber } func (d beta1CsiDriver) reCreateCSIDriverInfo(ctx context.Context) error { - csiDriver := d.csiDriver - csiClient := d.csiClient - err := csiClient.Delete(ctx, csiDriver.Name, metav1.DeleteOptions{}) + err := d.csiClient.Delete(ctx, d.csiDriver.Name, metav1.DeleteOptions{}) if err != nil { - return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", csiDriver.Name) + return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", d.csiDriver.Name) } - logger.Infof("CSIDriver object deleted for driver %q", csiDriver.Name) - _, err = csiClient.Create(ctx, csiDriver, metav1.CreateOptions{}) + logger.Infof("CSIDriver object deleted for driver %q", d.csiDriver.Name) + _, err = d.csiClient.Create(ctx, d.csiDriver, metav1.CreateOptions{}) if err != nil { - return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", csiDriver.Name) + return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", d.csiDriver.Name) } - logger.Infof("CSIDriver object recreated for driver %q", csiDriver.Name) + logger.Infof("CSIDriver object recreated for driver %q", d.csiDriver.Name) return nil } diff --git a/pkg/operator/ceph/csi/csidriver.go b/pkg/operator/ceph/csi/csidriver.go index f302674627ea..610fa5d60e2d 100644 --- a/pkg/operator/ceph/csi/csidriver.go +++ b/pkg/operator/ceph/csi/csidriver.go @@ -72,6 +72,8 @@ func (d v1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kubernet // As FSGroupPolicy field is immutable, should be set only during create time. // if the request is to change the FSGroupPolicy, we are deleting the CSIDriver object and creating it. if driver.Spec.FSGroupPolicy != nil && csiDriver.Spec.FSGroupPolicy != nil && *driver.Spec.FSGroupPolicy != *csiDriver.Spec.FSGroupPolicy { + d.csiClient = csidrivers + d.csiDriver = csiDriver return d.reCreateCSIDriverInfo(ctx) } @@ -88,18 +90,16 @@ func (d v1CsiDriver) createCSIDriverInfo(ctx context.Context, clientset kubernet } func (d v1CsiDriver) reCreateCSIDriverInfo(ctx context.Context) error { - csiDriver := d.csiDriver - csiClient := d.csiClient - err := csiClient.Delete(ctx, csiDriver.Name, metav1.DeleteOptions{}) + err := d.csiClient.Delete(ctx, d.csiDriver.Name, metav1.DeleteOptions{}) if err != nil { - return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", csiDriver.Name) + return errors.Wrapf(err, "failed to delete CSIDriver object for driver %q", d.csiDriver.Name) } - logger.Infof("CSIDriver object deleted for driver %q", csiDriver.Name) - _, err = csiClient.Create(ctx, d.csiDriver, metav1.CreateOptions{}) + logger.Infof("CSIDriver object deleted for driver %q", d.csiDriver.Name) + _, err = d.csiClient.Create(ctx, d.csiDriver, metav1.CreateOptions{}) if err != nil { - return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", csiDriver.Name) + return errors.Wrapf(err, "failed to recreate CSIDriver object for driver %q", d.csiDriver.Name) } - logger.Infof("CSIDriver object recreated for driver %q", csiDriver.Name) + logger.Infof("CSIDriver object recreated for driver %q", d.csiDriver.Name) return nil } diff --git a/pkg/operator/ceph/csi/peermap/config.go b/pkg/operator/ceph/csi/peermap/config.go new file mode 100644 index 000000000000..b7a8d6d7d48e --- /dev/null +++ b/pkg/operator/ceph/csi/peermap/config.go @@ -0,0 +1,368 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package peermap + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + + "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/k8sutil" + "github.com/rook/rook/pkg/util" + "github.com/rook/rook/pkg/util/exec" + v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + mappingConfigName = "rook-ceph-csi-mapping-config" + mappingConfigkey = "csi-mapping-config-json" +) + +var logger = capnslog.NewPackageLogger("github.com/rook/rook", "peer-map") + +type PeerIDMapping struct { + ClusterIDMapping map[string]string + RBDPoolIDMapping []map[string]string +} + +type PeerIDMappings []PeerIDMapping + +// addClusterIDMapping adds cluster ID map if not present already +func (m *PeerIDMappings) addClusterIDMapping(newClusterIDMap map[string]string) { + if m.clusterIDMapIndex(newClusterIDMap) == -1 { + newDRMap := PeerIDMapping{ + ClusterIDMapping: newClusterIDMap, + } + *m = append(*m, newDRMap) + } +} + +// addRBDPoolIDMapping adds all the pool ID maps for a given cluster ID map +func (m *PeerIDMappings) addRBDPoolIDMapping(clusterIDMap, newPoolIDMap map[string]string) { + for i := 0; i < len(*m); i++ { + if reflect.DeepEqual((*m)[i].ClusterIDMapping, clusterIDMap) { + (*m)[i].RBDPoolIDMapping = append((*m)[i].RBDPoolIDMapping, newPoolIDMap) + } + } +} + +// updateRBDPoolIDMapping updates the Pool ID mappings between local and peer cluster. +// It adds the cluster and Pool ID mappings if not present, else updates the pool ID map if required. +func (m *PeerIDMappings) updateRBDPoolIDMapping(newMappings PeerIDMapping) { + newClusterIDMap := newMappings.ClusterIDMapping + newPoolIDMap := newMappings.RBDPoolIDMapping[0] + peerPoolID, localPoolID := getMapKV(newPoolIDMap) + + // Append new mappings if no existing mappings are available + if len(*m) == 0 { + *m = append(*m, newMappings) + return + } + clusterIDMapExists := false + for i := 0; i < len(*m); i++ { + if reflect.DeepEqual((*m)[i].ClusterIDMapping, newClusterIDMap) { + clusterIDMapExists = true + poolIDMapUpdated := false + for j := 0; j < len((*m)[i].RBDPoolIDMapping); j++ { + existingPoolMap := (*m)[i].RBDPoolIDMapping[j] + if _, ok := existingPoolMap[peerPoolID]; ok { + poolIDMapUpdated = true + existingPoolMap[peerPoolID] = localPoolID + } + } + if !poolIDMapUpdated { + (*m)[i].RBDPoolIDMapping = append((*m)[i].RBDPoolIDMapping, newPoolIDMap) + } + } + } + + if !clusterIDMapExists { + *m = append(*m, newMappings) + } +} + +func (m *PeerIDMappings) clusterIDMapIndex(newClusterIDMap map[string]string) int { + for i, mapping := range *m { + if reflect.DeepEqual(mapping.ClusterIDMapping, newClusterIDMap) { + return i + } + } + return -1 +} + +func (m *PeerIDMappings) String() (string, error) { + mappingInBytes, err := json.Marshal(m) + if err != nil { + return "", errors.Wrap(err, "failed to marshal peer cluster mapping config") + } + + return string(mappingInBytes), nil +} + +func toObj(in string) (PeerIDMappings, error) { + var mappings PeerIDMappings + err := json.Unmarshal([]byte(in), &mappings) + if err != nil { + return mappings, errors.Wrap(err, "failed to unmarshal peer cluster mapping config") + } + + return mappings, nil +} + +func ReconcilePoolIDMap(clusterContext *clusterd.Context, clusterInfo *cephclient.ClusterInfo, pool *cephv1.CephBlockPool) error { + if pool.Spec.Mirroring.Peers == nil { + logger.Infof("no peer secrets added in ceph block pool %q. skipping pool ID mappings with peer cluster", pool.Name) + return nil + } + + mappings, err := getClusterPoolIDMap(clusterContext, clusterInfo, pool) + if err != nil { + return errors.Wrapf(err, "failed to get peer pool ID mappings for the pool %q", pool.Name) + } + + err = CreateOrUpdateConfig(clusterContext, mappings) + if err != nil { + return errors.Wrapf(err, "failed to create or update peer pool ID mappings configMap for the pool %q", pool.Name) + } + + logger.Infof("successfully updated config map with cluster and RDB pool ID mappings for the pool %q", pool.Name) + return nil +} + +// getClusterPoolIDMap returns a mapping between local and peer cluster ID, and between local and peer pool ID +func getClusterPoolIDMap(clusterContext *clusterd.Context, clusterInfo *cephclient.ClusterInfo, pool *cephv1.CephBlockPool) (*PeerIDMappings, error) { + mappings := &PeerIDMappings{} + + // Get local cluster pool details + localPoolDetails, err := cephclient.GetPoolDetails(clusterContext, clusterInfo, pool.Name) + if err != nil { + return mappings, errors.Wrapf(err, "failed to get details for the pool %q", pool.Name) + } + + logger.Debugf("pool details of local cluster %+v", localPoolDetails) + + for _, peerSecret := range pool.Spec.Mirroring.Peers.SecretNames { + s, err := clusterContext.Clientset.CoreV1().Secrets(clusterInfo.Namespace).Get(context.TODO(), peerSecret, metav1.GetOptions{}) + if err != nil { + return mappings, errors.Wrapf(err, "failed to fetch kubernetes secret %q bootstrap peer", peerSecret) + } + + token := s.Data["token"] + decodedTokenToGo, err := decodePeerToken(string(token)) + if err != nil { + return mappings, errors.Wrap(err, "failed to decode bootstrap peer token") + } + + peerClientName := fmt.Sprintf("client.%s", decodedTokenToGo.ClientID) + credentials := cephclient.CephCred{ + Username: peerClientName, + Secret: decodedTokenToGo.Key, + } + + // Add cluster ID mappings + clusterIDMapping := map[string]string{ + decodedTokenToGo.Namespace: clusterInfo.Namespace, + } + + mappings.addClusterIDMapping(clusterIDMapping) + + // Generate peer cluster keyring in a temporary file + keyring := cephclient.CephKeyring(credentials) + keyringFile, err := util.CreateTempFile(keyring) + if err != nil { + return mappings, errors.Wrap(err, "failed to create a temp keyring file") + } + defer os.Remove(keyringFile.Name()) + + // Generate an empty config file to be passed as `--conf`argument in ceph CLI + configFile, err := util.CreateTempFile("") + if err != nil { + return mappings, errors.Wrap(err, "failed to create a temp config file") + } + defer os.Remove(configFile.Name()) + + // Build command + args := []string{"osd", "pool", "get", pool.Name, "all", + fmt.Sprintf("--cluster=%s", decodedTokenToGo.Namespace), + fmt.Sprintf("--conf=%s", configFile.Name()), + fmt.Sprintf("--fsid=%s", decodedTokenToGo.ClusterFSID), + fmt.Sprintf("--mon-host=%s", decodedTokenToGo.MonHost), + fmt.Sprintf("--keyring=%s", keyringFile.Name()), + fmt.Sprintf("--name=%s", peerClientName), + "--format", "json", + } + + // Get peer cluster pool details + peerPoolDetails, err := getPeerPoolDetails(clusterContext, args...) + if err != nil { + return mappings, errors.Wrapf(err, "failed to get pool details from peer cluster %q", decodedTokenToGo.Namespace) + } + + logger.Debugf("pool details from peer cluster %+v", peerPoolDetails) + + // Add Pool ID mappings + poolIDMapping := map[string]string{ + strconv.Itoa(peerPoolDetails.Number): strconv.Itoa(localPoolDetails.Number), + } + mappings.addRBDPoolIDMapping(clusterIDMapping, poolIDMapping) + } + + return mappings, nil +} + +func CreateOrUpdateConfig(clusterContext *clusterd.Context, mappings *PeerIDMappings) error { + ctx := context.TODO() + data, err := mappings.String() + if err != nil { + return errors.Wrap(err, "failed to convert peer cluster mappings struct to string") + } + + opNamespace := os.Getenv(k8sutil.PodNamespaceEnvVar) + request := types.NamespacedName{Name: mappingConfigName, Namespace: opNamespace} + existingConfigMap := &v1.ConfigMap{} + + err = clusterContext.Client.Get(ctx, request, existingConfigMap) + if err != nil { + if kerrors.IsNotFound(err) { + // Create new configMap + return createConfig(clusterContext, request, data) + } + return errors.Wrapf(err, "failed to get existing mapping config map %q", existingConfigMap.Name) + } + + existingCMData := existingConfigMap.Data[mappingConfigkey] + if existingCMData == "[]" { + existingConfigMap.Data[mappingConfigkey] = data + } else { + existingMappings, err := toObj(existingCMData) + if err != nil { + return errors.Wrapf(err, "failed to extract existing mapping data from the config map %q", existingConfigMap.Name) + } + updatedCMData, err := UpdateExistingData(&existingMappings, mappings) + if err != nil { + return errors.Wrapf(err, "failed to update existing mapping data from the config map %q", existingConfigMap.Name) + } + existingConfigMap.Data[mappingConfigkey] = updatedCMData + } + + // Update existing configMap + if err := clusterContext.Client.Update(ctx, existingConfigMap); err != nil { + return errors.Wrapf(err, "failed to update existing mapping config map %q", existingConfigMap.Name) + } + + return nil +} + +func UpdateExistingData(existingMappings, newMappings *PeerIDMappings) (string, error) { + for i, mapping := range *newMappings { + if len(mapping.RBDPoolIDMapping) == 0 { + logger.Warning("no pool ID mapping available between local and peer cluster") + continue + } + existingMappings.updateRBDPoolIDMapping((*newMappings)[i]) + } + + data, err := existingMappings.String() + if err != nil { + return "", errors.Wrap(err, "failed to convert peer cluster mappings struct to string") + } + return data, nil +} + +func createConfig(clusterContext *clusterd.Context, request types.NamespacedName, data string) error { + newConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: request.Name, + Namespace: request.Namespace, + }, + Data: map[string]string{ + mappingConfigkey: data, + }, + } + + // Get Operator owner reference + operatorPodName := os.Getenv(k8sutil.PodNameEnvVar) + ownerRef, err := k8sutil.GetDeploymentOwnerReference(clusterContext.Clientset, operatorPodName, request.Namespace) + if err != nil { + return errors.Wrap(err, "failed to get operator owner reference") + } + if ownerRef != nil { + blockOwnerDeletion := false + ownerRef.BlockOwnerDeletion = &blockOwnerDeletion + } + + ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, request.Namespace) + + // Set controller reference only when creating the configMap for the first time + err = ownerInfo.SetControllerReference(newConfigMap) + if err != nil { + return errors.Wrapf(err, "failed to set owner reference on configMap %q", newConfigMap.Name) + } + + err = clusterContext.Client.Create(context.TODO(), newConfigMap) + if err != nil { + return errors.Wrapf(err, "failed to create mapping configMap %q", newConfigMap.Name) + } + return nil +} + +func decodePeerToken(token string) (*cephclient.PeerToken, error) { + // decode the base64 encoded token + decodedToken, err := base64.StdEncoding.DecodeString(string(token)) + if err != nil { + return nil, errors.Wrap(err, "failed to decode bootstrap peer token") + } + + // Unmarshal the decoded token to a Go type + var decodedTokenToGo cephclient.PeerToken + err = json.Unmarshal(decodedToken, &decodedTokenToGo) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal decoded token") + } + + logger.Debugf("peer cluster info %+v", decodedTokenToGo) + + return &decodedTokenToGo, nil +} + +func getPeerPoolDetails(ctx *clusterd.Context, args ...string) (cephclient.CephStoragePoolDetails, error) { + peerPoolDetails, err := ctx.Executor.ExecuteCommandWithTimeout(exec.CephCommandsTimeout, "ceph", args...) + if err != nil { + return cephclient.CephStoragePoolDetails{}, errors.Wrap(err, "failed to get pool details from peer cluster") + } + + return cephclient.ParsePoolDetails([]byte(peerPoolDetails)) +} + +func getMapKV(input map[string]string) (string, string) { + for k, v := range input { + return k, v + } + return "", "" +} diff --git a/pkg/operator/ceph/csi/peermap/config_test.go b/pkg/operator/ceph/csi/peermap/config_test.go new file mode 100644 index 000000000000..0b1d4fdb782c --- /dev/null +++ b/pkg/operator/ceph/csi/peermap/config_test.go @@ -0,0 +1,471 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package peermap + +import ( + "context" + "os" + "reflect" + "testing" + "time" + + "strings" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + "github.com/rook/rook/pkg/clusterd" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/test" + exectest "github.com/rook/rook/pkg/util/exec/test" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestAddClusterIDMapping(t *testing.T) { + clusterMap1 := map[string]string{"cluster-1": "cluster-2"} + m := &PeerIDMappings{} + m.addClusterIDMapping(clusterMap1) + assert.Equal(t, 1, len(*m)) + + // Add same cluster map again and assert that it didn't get added. + m.addClusterIDMapping(clusterMap1) + assert.Equal(t, 1, len(*m)) + + // Add new cluster map and assert that it got added. + clusterMap2 := map[string]string{"cluster-1": "cluster-3"} + m.addClusterIDMapping(clusterMap2) + assert.Equal(t, 2, len(*m)) +} + +func TestUpdateClusterPoolIDMap(t *testing.T) { + m := &PeerIDMappings{} + + // Ensure only local:peer-1 mapping should be present + newMappings := PeerIDMapping{ + ClusterIDMapping: map[string]string{"local": "peer-1"}, + RBDPoolIDMapping: []map[string]string{{"1": "2"}}, + } + m.updateRBDPoolIDMapping(newMappings) + assert.Equal(t, len(*m), 1) + assert.Equal(t, (*m)[0].ClusterIDMapping["local"], "peer-1") + assert.Equal(t, len((*m)[0].RBDPoolIDMapping), 1) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "2") + + // Ensure RBD pool ID mappings get updated + newMappings = PeerIDMapping{ + ClusterIDMapping: map[string]string{"local": "peer-1"}, + RBDPoolIDMapping: []map[string]string{{"1": "3"}}, + } + m.updateRBDPoolIDMapping(newMappings) + assert.Equal(t, len(*m), 1) + assert.Equal(t, (*m)[0].ClusterIDMapping["local"], "peer-1") + assert.Equal(t, len((*m)[0].RBDPoolIDMapping), 1) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "3") + + // Ensure that new pool ID mappings got added + newMappings = PeerIDMapping{ + ClusterIDMapping: map[string]string{"local": "peer-1"}, + RBDPoolIDMapping: []map[string]string{{"2": "4"}}, + } + m.updateRBDPoolIDMapping(newMappings) + assert.Equal(t, len(*m), 1) + assert.Equal(t, (*m)[0].ClusterIDMapping["local"], "peer-1") + assert.Equal(t, len((*m)[0].RBDPoolIDMapping), 2) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "3") + assert.Equal(t, (*m)[0].RBDPoolIDMapping[1]["2"], "4") + + // Ensure that new pool ID mappings got added + newMappings = PeerIDMapping{ + ClusterIDMapping: map[string]string{"local": "peer-1"}, + RBDPoolIDMapping: []map[string]string{{"3": "5"}}, + } + m.updateRBDPoolIDMapping(newMappings) + assert.Equal(t, len(*m), 1) + assert.Equal(t, (*m)[0].ClusterIDMapping["local"], "peer-1") + assert.Equal(t, len((*m)[0].RBDPoolIDMapping), 3) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "3") + assert.Equal(t, (*m)[0].RBDPoolIDMapping[1]["2"], "4") + assert.Equal(t, (*m)[0].RBDPoolIDMapping[2]["3"], "5") + + // Ensure that new Cluster ID mappings got added + newMappings = PeerIDMapping{ + ClusterIDMapping: map[string]string{"local": "peer-2"}, + RBDPoolIDMapping: []map[string]string{{"1": "3"}}, + } + m.updateRBDPoolIDMapping(newMappings) + assert.Equal(t, len(*m), 2) + assert.Equal(t, (*m)[0].ClusterIDMapping["local"], "peer-1") + assert.Equal(t, len((*m)[0].RBDPoolIDMapping), 3) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "3") + assert.Equal(t, (*m)[0].RBDPoolIDMapping[1]["2"], "4") + assert.Equal(t, (*m)[0].RBDPoolIDMapping[2]["3"], "5") + + assert.Equal(t, (*m)[1].ClusterIDMapping["local"], "peer-2") + assert.Equal(t, len((*m)[1].RBDPoolIDMapping), 1) + assert.Equal(t, (*m)[0].RBDPoolIDMapping[0]["1"], "3") +} + +func TestAddPoolIDMapping(t *testing.T) { + clusterMap1 := map[string]string{"cluster-1": "cluster-2"} + m := &PeerIDMappings{} + m.addClusterIDMapping(clusterMap1) + assert.Equal(t, 1, len(*m)) + + // Add two Pool ID mapping + poolIDMap1 := map[string]string{"1": "2"} + poolIDMap2 := map[string]string{"2": "3"} + + m.addRBDPoolIDMapping(clusterMap1, poolIDMap1) + m.addRBDPoolIDMapping(clusterMap1, poolIDMap2) + + assert.Equal(t, 2, len((*m)[0].RBDPoolIDMapping)) + + // Add another cluster ID mapping + clusterMap2 := map[string]string{"cluster-1": "cluster-3"} + m.addClusterIDMapping(clusterMap2) + + // Add one Pool ID mapping + poolIDMap3 := map[string]string{"2": "4"} + m.addRBDPoolIDMapping(clusterMap2, poolIDMap3) + + // Assert total of two mappings are added + assert.Equal(t, 2, len(*m)) + + // Assert two pool ID mappings are available for first cluster mapping + assert.Equal(t, 2, len((*m)[0].RBDPoolIDMapping)) + + // Assert one pool ID mapping is available for second cluster mapping + assert.Equal(t, 1, len((*m)[1].RBDPoolIDMapping)) +} + +const ( + ns = "rook-ceph-primary" +) + +// #nosec G101 fake token for peer cluster "peer1" +var fakeTokenPeer1 = "eyJmc2lkIjoiOWY1MjgyZGItYjg5OS00NTk2LTgwOTgtMzIwYzFmYzM5NmYzIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBUnczOWQwdkhvQmhBQVlMM1I4RmR5dHNJQU50bkFTZ0lOTVE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMS4zOjY4MjAsdjE6MTkyLjE2OC4xLjM6NjgyMV0iLCAibmFtZXNwYWNlIjogInBlZXIxIn0=" + +// #nosec G101 fake token for peer cluster "peer2" +var fakeTokenPeer2 = "eyJmc2lkIjoiOWY1MjgyZGItYjg5OS00NTk2LTgwOTgtMzIwYzFmYzM5NmYzIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBUnczOWQwdkhvQmhBQVlMM1I4RmR5dHNJQU50bkFTZ0lOTVE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMS4zOjY4MjAsdjE6MTkyLjE2OC4xLjM6NjgyMV0iLCAibmFtZXNwYWNlIjogInBlZXIyIn0=" + +// #nosec G101 fake token for peer cluster "peer3" +var fakeTokenPeer3 = "eyJmc2lkIjoiOWY1MjgyZGItYjg5OS00NTk2LTgwOTgtMzIwYzFmYzM5NmYzIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBUnczOWQwdkhvQmhBQVlMM1I4RmR5dHNJQU50bkFTZ0lOTVE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMS4zOjY4MjAsdjE6MTkyLjE2OC4xLjM6NjgyMV0iLCAibmFtZXNwYWNlIjogInBlZXIzIn0=" + +var peer1Secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peer1Secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "token": []byte(fakeTokenPeer1), + }, +} + +var peer2Secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peer2Secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "token": []byte(fakeTokenPeer2), + }, +} + +var peer3Secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peer3Secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "token": []byte(fakeTokenPeer3), + }, +} + +var fakeSinglePeerCephBlockPool = cephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mirrorPool1", + Namespace: ns, + }, + Spec: cephv1.PoolSpec{ + Mirroring: cephv1.MirroringSpec{ + Peers: &cephv1.MirroringPeerSpec{ + SecretNames: []string{ + "peer1Secret", + }, + }, + }, + }, +} + +var fakeMultiPeerCephBlockPool = cephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mirrorPool1", + Namespace: ns, + }, + Spec: cephv1.PoolSpec{ + Mirroring: cephv1.MirroringSpec{ + Peers: &cephv1.MirroringPeerSpec{ + SecretNames: []string{ + "peer1Secret", + "peer2Secret", + "peer3Secret", + }, + }, + }, + }, +} + +var mockExecutor = &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + // Fake pool details for "rook-ceph-primary" cluster + if args[0] == "osd" && args[1] == "pool" && args[2] == "get" && strings.HasSuffix(args[6], ns) { + if args[3] == "mirrorPool1" { + return `{"pool_id": 1}`, nil + } else if args[3] == "mirrorPool2" { + return `{"pool_id": 2}`, nil + } + + } + return "", nil + }, + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { + logger.Infof("Command: %s %v", command, args) + if args[0] == "osd" && args[1] == "pool" && args[2] == "get" && strings.HasSuffix(args[5], "peer1") { + if args[3] == "mirrorPool1" { + return `{"pool_id": 2}`, nil + } else if args[3] == "mirrorPool2" { + return `{"pool_id": 3}`, nil + } + } + if args[0] == "osd" && args[1] == "pool" && args[2] == "get" && strings.HasSuffix(args[5], "peer2") { + if args[3] == "mirrorPool1" { + return `{"pool_id": 3}`, nil + } else if args[3] == "mirrorPool2" { + return `{"pool_id": 4}`, nil + } + } + if args[0] == "osd" && args[1] == "pool" && args[2] == "get" && strings.HasSuffix(args[5], "peer3") { + if args[3] == "mirrorPool1" { + return `{"pool_id": 4}`, nil + } else if args[3] == "mirrorPool2" { + return `{"pool_id": 5}`, nil + } + } + return "", nil + }, +} + +func TestSinglePeerMappings(t *testing.T) { + clusterInfo := &cephclient.ClusterInfo{Namespace: ns} + fakeContext := &clusterd.Context{ + Executor: mockExecutor, + Clientset: test.New(t, 3), + } + + // create fake secret with "peer1" cluster token + _, err := fakeContext.Clientset.CoreV1().Secrets(ns).Create(context.TODO(), &peer1Secret, metav1.CreateOptions{}) + assert.NoError(t, err) + + //expected: &[{ClusterIDMapping:{peer1:rook-ceph-primary}. RBDPoolIDMapping:[{2:1}]}] + actualMappings, err := getClusterPoolIDMap( + fakeContext, + clusterInfo, + &fakeSinglePeerCephBlockPool, + ) + assert.NoError(t, err) + mappings := *actualMappings + assert.Equal(t, 1, len(mappings)) + assert.Equal(t, ns, mappings[0].ClusterIDMapping["peer1"]) + assert.Equal(t, "1", mappings[0].RBDPoolIDMapping[0]["2"]) +} + +func TestMultiPeerMappings(t *testing.T) { + clusterInfo := &cephclient.ClusterInfo{Namespace: ns} + fakeContext := &clusterd.Context{ + Executor: mockExecutor, + Clientset: test.New(t, 3), + } + + // create fake secret with "peer1" cluster token + _, err := fakeContext.Clientset.CoreV1().Secrets(ns).Create(context.TODO(), &peer1Secret, metav1.CreateOptions{}) + assert.NoError(t, err) + + // create fake secret with "peer2" cluster token + _, err = fakeContext.Clientset.CoreV1().Secrets(ns).Create(context.TODO(), &peer2Secret, metav1.CreateOptions{}) + assert.NoError(t, err) + + // create fake secret with "peer3" cluster token + _, err = fakeContext.Clientset.CoreV1().Secrets(ns).Create(context.TODO(), &peer3Secret, metav1.CreateOptions{}) + assert.NoError(t, err) + + actualMappings, err := getClusterPoolIDMap( + fakeContext, + clusterInfo, + &fakeMultiPeerCephBlockPool, + ) + assert.NoError(t, err) + mappings := *actualMappings + /* Expected: + [ + {ClusterIDMapping:{peer1:rook-ceph-primary}, RBDPoolIDMapping:[{2:1}]} + {ClusterIDMapping:{peer2:rook-ceph-primary}, RBDPoolIDMapping:[{3:1}]} + {ClusterIDMapping:map{peer3:rook-ceph-primary} RBDPoolIDMapping:[{4:1}]} + ] + */ + + assert.Equal(t, 3, len(mappings)) + + assert.Equal(t, 1, len(mappings[0].ClusterIDMapping)) + assert.Equal(t, ns, mappings[0].ClusterIDMapping["peer1"]) + assert.Equal(t, "1", mappings[0].RBDPoolIDMapping[0]["2"]) + + assert.Equal(t, 1, len(mappings[1].ClusterIDMapping)) + assert.Equal(t, ns, mappings[1].ClusterIDMapping["peer2"]) + assert.Equal(t, "1", mappings[1].RBDPoolIDMapping[0]["3"]) + + assert.Equal(t, 1, len(mappings[2].ClusterIDMapping)) + assert.Equal(t, ns, mappings[2].ClusterIDMapping["peer3"]) + assert.Equal(t, "1", mappings[2].RBDPoolIDMapping[0]["4"]) +} + +func TestDecodePeerToken(t *testing.T) { + // Valid token + decodedToken, err := decodePeerToken(fakeTokenPeer1) + assert.NoError(t, err) + assert.Equal(t, "peer1", decodedToken.Namespace) + + // Invalid token + _, err = decodePeerToken("invalidToken") + assert.Error(t, err) +} + +func fakeOperatorPod() *corev1.Pod { + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: ns, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testReplicaSet", + }, + }, + }, + Spec: corev1.PodSpec{}, + } + return p +} + +func fakeReplicaSet() *appsv1.ReplicaSet { + r := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testReplicaSet", + Namespace: ns, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + }, + }, + }, + } + + return r +} + +func TestCreateOrUpdateConfig(t *testing.T) { + os.Setenv("POD_NAME", "test") + defer os.Setenv("POD_NAME", "") + os.Setenv("POD_NAMESPACE", ns) + defer os.Setenv("POD_NAMESPACE", "") + + scheme := scheme.Scheme + err := cephv1.AddToScheme(scheme) + assert.NoError(t, err) + + err = appsv1.AddToScheme(scheme) + assert.NoError(t, err) + + err = corev1.AddToScheme(scheme) + assert.NoError(t, err) + + fakeContext := &clusterd.Context{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects().Build(), + Executor: mockExecutor, + Clientset: test.New(t, 3), + } + + // Create fake pod + _, err = fakeContext.Clientset.CoreV1().Pods(ns).Create(context.TODO(), fakeOperatorPod(), metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create fake replicaset + _, err = fakeContext.Clientset.AppsV1().ReplicaSets(ns).Create(context.TODO(), fakeReplicaSet(), metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create empty ID mapping configMap + err = CreateOrUpdateConfig(fakeContext, &PeerIDMappings{}) + assert.NoError(t, err) + validateConfig(t, fakeContext, PeerIDMappings{}) + + // Create ID mapping configMap with data + actualMappings := &PeerIDMappings{ + { + ClusterIDMapping: map[string]string{"peer1": ns}, + RBDPoolIDMapping: []map[string]string{ + { + "2": "1", + }, + }, + }, + } + + err = CreateOrUpdateConfig(fakeContext, actualMappings) + assert.NoError(t, err) + //validateConfig(t, fakeContext, actualMappings) + + //Update existing mapping config + mappings := *actualMappings + mappings = append(mappings, PeerIDMapping{ + ClusterIDMapping: map[string]string{"peer2": ns}, + RBDPoolIDMapping: []map[string]string{ + { + "3": "1", + }, + }, + }) + + err = CreateOrUpdateConfig(fakeContext, &mappings) + assert.NoError(t, err) + validateConfig(t, fakeContext, mappings) +} + +func validateConfig(t *testing.T, c *clusterd.Context, mappings PeerIDMappings) { + cm := &corev1.ConfigMap{} + err := c.Client.Get(context.TODO(), types.NamespacedName{Name: mappingConfigName, Namespace: ns}, cm) + assert.NoError(t, err) + + data := cm.Data[mappingConfigkey] + expectedMappings, err := toObj(data) + + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(mappings, expectedMappings)) +} diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 573aa9d386a0..ab74aa6789a5 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -64,7 +64,7 @@ type Param struct { CephFSLivenessMetricsPort uint16 RBDGRPCMetricsPort uint16 RBDLivenessMetricsPort uint16 - ProvisionerReplicas uint8 + ProvisionerReplicas int32 CSICephFSPodLabels map[string]string CSIRBDPodLabels map[string]string } @@ -108,12 +108,12 @@ var ( // manually challenging. var ( // image names - DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.3.1" - DefaultRegistrarImage = "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" - DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" - DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" - DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" - DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" + DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.4.0" + DefaultRegistrarImage = "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0" + DefaultProvisionerImage = "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0" + DefaultAttacherImage = "k8s.gcr.io/sig-storage/csi-attacher:v3.3.0" + DefaultSnapshotterImage = "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0" + DefaultResizerImage = "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0" DefaultVolumeReplicationImage = "quay.io/csiaddons/volumereplication-operator:v0.1.0" ) @@ -173,6 +173,9 @@ const ( // default log level for csi containers defaultLogLevel uint8 = 0 + // default provisioner replicas + defaultProvisionerReplicas int32 = 2 + // update strategy rollingUpdate = "RollingUpdate" onDelete = "OnDelete" @@ -313,13 +316,6 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter return errors.Wrap(err, "failed to load CSI_PROVISIONER_PRIORITY_CLASSNAME setting") } - // OMAP generator will be enabled by default - // If AllowUnsupported is set to false and if CSI version is less than - // <3.2.0 disable OMAP generator sidecar - if !v.SupportsOMAPController() { - tp.EnableOMAPGenerator = false - } - enableOMAPGenerator, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_ENABLE_OMAP_GENERATOR", "false") if err != nil { return errors.Wrap(err, "failed to load CSI_ENABLE_OMAP_GENERATOR setting") @@ -409,14 +405,26 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter } } - tp.ProvisionerReplicas = 2 + tp.ProvisionerReplicas = defaultProvisionerReplicas nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err == nil { if len(nodes.Items) == 1 { tp.ProvisionerReplicas = 1 + } else { + replicas, err := k8sutil.GetOperatorSetting(clientset, controllerutil.OperatorSettingConfigMapName, "CSI_PROVISIONER_REPLICAS", "2") + if err != nil { + logger.Warningf("failed to load CSI_PROVISIONER_REPLICAS. Defaulting to %d. %v", tp.ProvisionerReplicas, err) + } else { + r, err := strconv.ParseInt(replicas, 10, 32) + if err != nil { + logger.Errorf("failed to parse CSI_PROVISIONER_REPLICAS. Defaulting to %d. %v", tp.ProvisionerReplicas, err) + } else { + tp.ProvisionerReplicas = int32(r) + } + } } } else { - logger.Errorf("failed to get nodes. Defaulting the number of replicas of provisioner pods to 2. %v", err) + logger.Errorf("failed to get nodes. Defaulting the number of replicas of provisioner pods to %d. %v", tp.ProvisionerReplicas, err) } if EnableRBD { @@ -435,7 +443,6 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter return errors.Wrap(err, "failed to load rbd plugin service template") } rbdService.Namespace = namespace - logger.Info("successfully started CSI Ceph RBD") } if EnableCephFS { cephfsPlugin, err = templateToDaemonSet("cephfsplugin", CephFSPluginTemplatePath, tp) @@ -453,7 +460,6 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter return errors.Wrap(err, "failed to load cephfs plugin service template") } cephfsService.Namespace = namespace - logger.Info("successfully started CSI CephFS driver") } // get common provisioner tolerations and node affinity @@ -516,6 +522,7 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter return errors.Wrapf(err, "failed to start rbd provisioner deployment: %+v", rbdProvisionerDeployment) } k8sutil.AddRookVersionLabelToDeployment(rbdProvisionerDeployment) + logger.Info("successfully started CSI Ceph RBD driver") } if rbdService != nil { @@ -584,6 +591,7 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter return errors.Wrapf(err, "failed to start cephfs provisioner deployment: %+v", cephfsProvisionerDeployment) } k8sutil.AddRookVersionLabelToDeployment(cephfsProvisionerDeployment) + logger.Info("successfully started CSI CephFS driver") } if cephfsService != nil { err = ownerInfo.SetControllerReference(cephfsService) @@ -612,7 +620,7 @@ func startDrivers(clientset kubernetes.Interface, rookclientset rookclient.Inter if err != nil { // logging a warning and intentionally continuing with the default // log level - logger.Warningf("failed to parse CSI_CEPHFS_FSGROUPPOLICY. Defaulting to %q. %v", k8scsi.ReadWriteOnceWithFSTypeFSGroupPolicy, err) + logger.Warningf("failed to parse CSI_CEPHFS_FSGROUPPOLICY. Defaulting to %q. %v", k8scsi.NoneFSGroupPolicy, err) } err = csiDriverobj.createCSIDriverInfo(ctx, clientset, CephFSDriverName, fsGroupPolicyForCephFS) if err != nil { @@ -718,8 +726,11 @@ func validateCSIVersion(clientset kubernetes.Interface, namespace, rookImage, se job := versionReporter.Job() job.Spec.Template.Spec.ServiceAccountName = serviceAccountName - // Apply csi provisioner toleration for csi version check job + // Apply csi provisioner toleration and affinity for csi version check job job.Spec.Template.Spec.Tolerations = getToleration(clientset, provisionerTolerationsEnv, []corev1.Toleration{}) + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: getNodeAffinity(clientset, provisionerNodeAffinityEnv, &corev1.NodeAffinity{}), + } stdout, _, retcode, err := versionReporter.Run(timeout) if err != nil { return nil, errors.Wrap(err, "failed to complete ceph CSI version job") diff --git a/pkg/operator/ceph/csi/version.go b/pkg/operator/ceph/csi/version.go index 8f978b02c911..bcc8166b3a41 100644 --- a/pkg/operator/ceph/csi/version.go +++ b/pkg/operator/ceph/csi/version.go @@ -25,17 +25,16 @@ import ( ) var ( - //minimum supported version is 2.0.0 - minimum = CephCSIVersion{2, 0, 0} + //minimum supported version is 3.3.0 + minimum = CephCSIVersion{3, 3, 0} //supportedCSIVersions are versions that rook supports - releaseV210 = CephCSIVersion{2, 1, 0} - releasev300 = CephCSIVersion{3, 0, 0} - releasev310 = CephCSIVersion{3, 1, 0} - releasev320 = CephCSIVersion{3, 2, 0} releasev330 = CephCSIVersion{3, 3, 0} - supportedCSIVersions = []CephCSIVersion{minimum, releaseV210, releasev300, releasev310, releasev320, releasev330} - // omap generator is supported in v3.2.0+ - omapSupportedVersions = releasev320 + releasev340 = CephCSIVersion{3, 4, 0} + supportedCSIVersions = []CephCSIVersion{ + minimum, + releasev330, + releasev340, + } // for parsing the output of `cephcsi` versionCSIPattern = regexp.MustCompile(`v(\d+)\.(\d+)\.(\d+)`) ) @@ -52,33 +51,6 @@ func (v *CephCSIVersion) String() string { v.Major, v.Minor, v.Bugfix) } -// SupportsOMAPController checks if the detected version supports OMAP generator -func (v *CephCSIVersion) SupportsOMAPController() bool { - - // if AllowUnsupported is set also a csi-image greater than the supported ones are allowed - if AllowUnsupported { - return true - } - - if !v.isAtLeast(&minimum) { - return false - } - - if v.Major > omapSupportedVersions.Major { - return true - } - if v.Major == omapSupportedVersions.Major { - if v.Minor > omapSupportedVersions.Minor { - return true - } - if v.Minor == omapSupportedVersions.Minor { - return v.Bugfix >= omapSupportedVersions.Bugfix - } - } - - return false -} - // Supported checks if the detected version is part of the known supported CSI versions func (v *CephCSIVersion) Supported() bool { if !v.isAtLeast(&minimum) { diff --git a/pkg/operator/ceph/csi/version_test.go b/pkg/operator/ceph/csi/version_test.go index fb7d05b994ed..c69ae0c4ce5f 100644 --- a/pkg/operator/ceph/csi/version_test.go +++ b/pkg/operator/ceph/csi/version_test.go @@ -23,12 +23,9 @@ import ( ) var ( - testMinVersion = CephCSIVersion{2, 0, 0} - testReleaseV210 = CephCSIVersion{2, 1, 0} - testReleaseV300 = CephCSIVersion{3, 0, 0} - testReleaseV320 = CephCSIVersion{3, 2, 0} - testReleaseV321 = CephCSIVersion{3, 2, 1} + testMinVersion = CephCSIVersion{3, 3, 0} testReleaseV330 = CephCSIVersion{3, 3, 0} + testReleaseV340 = CephCSIVersion{3, 4, 0} testVersionUnsupported = CephCSIVersion{4, 0, 0} ) @@ -42,55 +39,15 @@ func TestIsAtLeast(t *testing.T) { ret = testMinVersion.isAtLeast(&testMinVersion) assert.Equal(t, true, ret) - // Test version which is greater (minor) - version = CephCSIVersion{2, 1, 0} - ret = testMinVersion.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 2, 0} - ret = testMinVersion.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test for v2.1.0 - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 0, 1} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, true, ret) - - // Test version which is equal - ret = testReleaseV210.isAtLeast(&testReleaseV210) - assert.Equal(t, true, ret) - - // Test version which is greater (minor) - version = CephCSIVersion{2, 1, 1} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{2, 2, 0} - ret = testReleaseV210.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test for 3.0.0 // Test version which is equal - ret = testReleaseV300.isAtLeast(&testReleaseV300) + ret = testReleaseV330.isAtLeast(&testReleaseV330) assert.Equal(t, true, ret) - // Test for 3.3.0 + // Test for 3.4.0 // Test version which is lesser - ret = testReleaseV330.isAtLeast(&testReleaseV300) + ret = testReleaseV340.isAtLeast(&testReleaseV330) assert.Equal(t, true, ret) - // Test version which is greater (minor) - version = CephCSIVersion{3, 1, 1} - ret = testReleaseV300.isAtLeast(&version) - assert.Equal(t, false, ret) - - // Test version which is greater (bugfix) - version = CephCSIVersion{3, 2, 0} - ret = testReleaseV300.isAtLeast(&version) - assert.Equal(t, false, ret) } func TestSupported(t *testing.T) { @@ -98,34 +55,16 @@ func TestSupported(t *testing.T) { ret := testMinVersion.Supported() assert.Equal(t, true, ret) - ret = testMinVersion.Supported() - assert.Equal(t, true, ret) - ret = testVersionUnsupported.Supported() assert.Equal(t, false, ret) -} - -func TestSupportOMAPController(t *testing.T) { - AllowUnsupported = true - ret := testMinVersion.SupportsOMAPController() - assert.True(t, ret) - AllowUnsupported = false - ret = testMinVersion.SupportsOMAPController() - assert.False(t, ret) - - ret = testReleaseV300.SupportsOMAPController() - assert.False(t, ret) - - ret = testReleaseV320.SupportsOMAPController() - assert.True(t, ret) - - ret = testReleaseV321.SupportsOMAPController() - assert.True(t, ret) + ret = testReleaseV330.Supported() + assert.Equal(t, true, ret) - ret = testReleaseV330.SupportsOMAPController() - assert.True(t, ret) + ret = testReleaseV340.Supported() + assert.Equal(t, true, ret) } + func Test_extractCephCSIVersion(t *testing.T) { expectedVersion := CephCSIVersion{3, 0, 0} csiString := []byte(`Cephcsi Version: v3.0.0 diff --git a/pkg/operator/ceph/disruption/clusterdisruption/osd.go b/pkg/operator/ceph/disruption/clusterdisruption/osd.go index 85792ca4fab4..5635913062df 100644 --- a/pkg/operator/ceph/disruption/clusterdisruption/osd.go +++ b/pkg/operator/ceph/disruption/clusterdisruption/osd.go @@ -407,6 +407,21 @@ func (r *ReconcileClusterDisruption) reconcilePDBsForOSDs( return reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } + // requeue if allowed disruptions in the default PDB is 0 + allowedDisruptions, err := r.getAllowedDisruptions(osdPDBAppName, request.Namespace) + if err != nil { + if apierrors.IsNotFound(err) { + logger.Debugf("default osd pdb %q not found. Skipping reconcile", osdPDBAppName) + return reconcile.Result{}, nil + } + return reconcile.Result{}, errors.Wrapf(err, "failed to get allowed disruptions count from default osd pdb %q.", osdPDBAppName) + } + + if allowedDisruptions == 0 { + logger.Info("reconciling osd pdb reconciler as the allowed disruptions in default pdb is 0") + return reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil + } + return reconcile.Result{}, nil } @@ -640,6 +655,30 @@ func getLastDrainTimeStamp(pdbStateMap *corev1.ConfigMap, key string) (time.Time return lastDrainTimeStamp, nil } +func (r *ReconcileClusterDisruption) getAllowedDisruptions(pdbName, namespace string) (int32, error) { + usePDBV1Beta1, err := k8sutil.UsePDBV1Beta1Version(r.context.ClusterdContext.Clientset) + if err != nil { + return -1, errors.Wrap(err, "failed to fetch pdb version") + } + if usePDBV1Beta1 { + pdb := &policyv1beta1.PodDisruptionBudget{} + err = r.client.Get(context.TODO(), types.NamespacedName{Name: pdbName, Namespace: namespace}, pdb) + if err != nil { + return -1, err + } + + return pdb.Status.DisruptionsAllowed, nil + } + + pdb := &policyv1.PodDisruptionBudget{} + err = r.client.Get(context.TODO(), types.NamespacedName{Name: pdbName, Namespace: namespace}, pdb) + if err != nil { + return -1, err + } + + return pdb.Status.DisruptionsAllowed, nil +} + func resetPDBConfig(pdbStateMap *corev1.ConfigMap) { pdbStateMap.Data[drainingFailureDomainKey] = "" delete(pdbStateMap.Data, drainingFailureDomainDurationKey) diff --git a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go b/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go index 7261dde11445..6c5a9c1825d4 100644 --- a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go +++ b/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go @@ -464,3 +464,31 @@ func TestHasNodeDrained(t *testing.T) { assert.NoError(t, err) assert.True(t, expected) } + +func TestGetAllowedDisruptions(t *testing.T) { + r := getFakeReconciler(t) + clientset := test.New(t, 3) + test.SetFakeKubernetesVersion(clientset, "v1.21.0") + r.context = &controllerconfig.Context{ClusterdContext: &clusterd.Context{Clientset: clientset}} + + // Default PDB is not available + allowedDisruptions, err := r.getAllowedDisruptions(osdPDBAppName, namespace) + assert.Error(t, err) + assert.Equal(t, int32(-1), allowedDisruptions) + + // Default PDB is available + pdb := &policyv1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: osdPDBAppName, + Namespace: namespace, + }, + Status: policyv1.PodDisruptionBudgetStatus{ + DisruptionsAllowed: int32(0), + }, + } + err = r.client.Create(context.TODO(), pdb) + assert.NoError(t, err) + allowedDisruptions, err = r.getAllowedDisruptions(osdPDBAppName, namespace) + assert.NoError(t, err) + assert.Equal(t, int32(0), allowedDisruptions) +} diff --git a/pkg/operator/ceph/file/controller.go b/pkg/operator/ceph/file/controller.go index 406de8527456..c86211943b21 100644 --- a/pkg/operator/ceph/file/controller.go +++ b/pkg/operator/ceph/file/controller.go @@ -31,6 +31,7 @@ import ( "github.com/rook/rook/pkg/operator/ceph/cluster/mon" opconfig "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/file/mirror" "github.com/rook/rook/pkg/operator/k8sutil" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -194,7 +195,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteFilesystem() function since everything is gone already @@ -288,10 +289,10 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil return reconcileResponse, err } + statusUpdated := false + // Enable mirroring if needed - // TODO: change me to 16.2.5 once it's out, in the mean this allows us to run the CI and validate the code - // if r.clusterInfo.CephVersion.IsAtLeast(mirror.PeerAdditionMinVersion) { - if r.clusterInfo.CephVersion.IsAtLeastPacific() { + if r.clusterInfo.CephVersion.IsAtLeast(mirror.PeerAdditionMinVersion) { // Disable mirroring on that filesystem if needed if cephFilesystem.Spec.Mirroring != nil { if !cephFilesystem.Spec.Mirroring.Enabled { @@ -308,7 +309,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil // Always create a bootstrap peer token in case another cluster wants to add us as a peer logger.Info("reconciling create cephfs-mirror peer configuration") - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, r.clusterInfo, cephFilesystem, request.NamespacedName, r.scheme) + reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, r.clusterInfo, cephFilesystem, k8sutil.NewOwnerInfo(cephFilesystem, r.scheme)) if err != nil { updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) return reconcileResponse, errors.Wrapf(err, "failed to create cephfs-mirror bootstrap peer for filesystem %q.", cephFilesystem.Name) @@ -322,6 +323,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil // Set Ready status, we are done reconciling updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, opcontroller.GenerateStatusInfo(cephFilesystem)) + statusUpdated = true // Run go routine check for mirroring status if !cephFilesystem.Spec.StatusCheck.Mirror.Disabled { @@ -336,7 +338,8 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } } } - } else { + } + if !statusUpdated { // Set Ready status, we are done reconciling updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, nil) } diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go index c6cba903b17e..2430c9b08f32 100644 --- a/pkg/operator/ceph/file/filesystem.go +++ b/pkg/operator/ceph/file/filesystem.go @@ -18,10 +18,8 @@ package file import ( "fmt" - "syscall" "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/util/exec" "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" @@ -51,37 +49,31 @@ func createFilesystem( ownerInfo *k8sutil.OwnerInfo, dataDirHostPath string, ) error { + logger.Infof("start running mdses for filesystem %q", fs.Name) + c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, ownerInfo, dataDirHostPath) + if err := c.Start(); err != nil { + return err + } + if len(fs.Spec.DataPools) != 0 { f := newFS(fs.Name, fs.Namespace) if err := f.doFilesystemCreate(context, clusterInfo, clusterSpec, fs.Spec); err != nil { return errors.Wrapf(err, "failed to create filesystem %q", fs.Name) } } - - filesystem, err := cephclient.GetFilesystem(context, clusterInfo, fs.Name) - if err != nil { - return errors.Wrapf(err, "failed to get filesystem %q", fs.Name) - } - if fs.Spec.MetadataServer.ActiveStandby { - if err = cephclient.AllowStandbyReplay(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveStandby); err != nil { + if err := cephclient.AllowStandbyReplay(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveStandby); err != nil { return errors.Wrapf(err, "failed to set allow_standby_replay to filesystem %q", fs.Name) } } // set the number of active mds instances if fs.Spec.MetadataServer.ActiveCount > 1 { - if err = cephclient.SetNumMDSRanks(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveCount); err != nil { + if err := cephclient.SetNumMDSRanks(context, clusterInfo, fs.Name, fs.Spec.MetadataServer.ActiveCount); err != nil { logger.Warningf("failed setting active mds count to %d. %v", fs.Spec.MetadataServer.ActiveCount, err) } } - logger.Infof("start running mdses for filesystem %q", fs.Name) - c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, filesystem, ownerInfo, dataDirHostPath) - if err := c.Start(); err != nil { - return err - } - return nil } @@ -94,15 +86,7 @@ func deleteFilesystem( ownerInfo *k8sutil.OwnerInfo, dataDirHostPath string, ) error { - filesystem, err := cephclient.GetFilesystem(context, clusterInfo, fs.Name) - if err != nil { - if code, ok := exec.ExitStatus(err); ok && code == int(syscall.ENOENT) { - // If we're deleting the filesystem anyway, ignore the error that the filesystem doesn't exist - return nil - } - return errors.Wrapf(err, "failed to get filesystem %q", fs.Name) - } - c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, filesystem, ownerInfo, dataDirHostPath) + c := mds.NewCluster(clusterInfo, context, clusterSpec, fs, ownerInfo, dataDirHostPath) // Delete mds CephX keys and configuration in centralized mon database replicas := fs.Spec.MetadataServer.ActiveCount * 2 @@ -110,7 +94,7 @@ func deleteFilesystem( daemonLetterID := k8sutil.IndexToName(i) daemonName := fmt.Sprintf("%s-%s", fs.Name, daemonLetterID) - err = c.DeleteMdsCephObjects(daemonName) + err := c.DeleteMdsCephObjects(daemonName) if err != nil { return errors.Wrapf(err, "failed to delete mds ceph objects for filesystem %q", fs.Name) } diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go index e5300459516d..9e6435dee0ba 100644 --- a/pkg/operator/ceph/file/filesystem_test.go +++ b/pkg/operator/ceph/file/filesystem_test.go @@ -95,7 +95,7 @@ func isBasePoolOperation(fsName, command string, args []string) bool { return false } -func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool) *exectest.MockExecutor { +func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createDataOnePoolCount, addDataOnePoolCount *int) *exectest.MockExecutor { mdsmap := cephclient.CephFilesystemDetails{ ID: 0, MDSMap: cephclient.MDSMap{ @@ -160,6 +160,16 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool) *exectest. return "", nil } else if contains(args, "flag") && contains(args, "enable_multiple") { return "", nil + } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { + *createDataOnePoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { + *addDataOnePoolCount++ + return "", nil } else if contains(args, "versions") { versionStr, _ := json.Marshal( map[string]map[string]int{ @@ -213,6 +223,16 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool) *exectest. return "", nil } else if contains(args, "config") && contains(args, "get") { return "{}", nil + } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { + *createDataOnePoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { + *addDataOnePoolCount++ + return "", nil } else if contains(args, "versions") { versionStr, _ := json.Marshal( map[string]map[string]int{ @@ -257,9 +277,10 @@ func TestCreateFilesystem(t *testing.T) { var deploymentsUpdated *[]*apps.Deployment mds.UpdateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() configDir, _ := ioutil.TempDir("", "") - fsName := "myfs" - executor := fsExecutor(t, fsName, configDir, false) + addDataOnePoolCount := 0 + createDataOnePoolCount := 0 + executor := fsExecutor(t, fsName, configDir, false, &createDataOnePoolCount, &addDataOnePoolCount) defer os.RemoveAll(configDir) clientset := testop.New(t, 1) context := &clusterd.Context{ @@ -271,114 +292,57 @@ func TestCreateFilesystem(t *testing.T) { // start a basic cluster ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() - err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // starting again should be a no-op - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - // Increasing the number of data pools should be successful. - createDataOnePoolCount := 0 - addDataOnePoolCount := 0 - createdFsResponse := fmt.Sprintf(`{"fs_name": "%s", "metadata_pool": 2, "data_pools":[3]}`, fsName) - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "fs") && contains(args, "get") { - return createdFsResponse, nil - } else if isBasePoolOperation(fsName, command, args) { - return "", nil - } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { - createDataOnePoolCount++ - return "", nil - } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { - addDataOnePoolCount++ - return "", nil - } else if contains(args, "set") && contains(args, "max_mds") { - return "", nil - } else if contains(args, "auth") && contains(args, "get-or-create-key") { - return "{\"key\":\"mysecurekey\"}", nil - } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { - return "", nil - } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { - return "", nil - } else if args[0] == "config" && args[1] == "set" { - return "", nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } - assert.Fail(t, fmt.Sprintf("Unexpected command: %v", args)) - return "", nil - }, - } - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset} - fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}) - - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.Nil(t, err) - validateStart(ctx, t, context, fs) - assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - assert.Equal(t, 1, createDataOnePoolCount) - assert.Equal(t, 1, addDataOnePoolCount) - testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) - - // Test multiple filesystem creation - // Output to check multiple filesystem creation - fses := `[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,"data_pool_ids":[5],"data_pools":["myfs-data0"]},{"name":"myfs2","metadata_pool":"myfs2-metadata","metadata_pool_id":6,"data_pool_ids":[7],"data_pools":["myfs2-data0"]},{"name":"leseb","metadata_pool":"cephfs.leseb.meta","metadata_pool_id":8,"data_pool_ids":[9],"data_pools":["cephfs.leseb.data"]}]` - executorMultiFS := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if contains(args, "ls") { - return fses, nil - } else if contains(args, "versions") { - versionStr, _ := json.Marshal( - map[string]map[string]int{ - "mds": { - "ceph version 16.0.0-4-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) pacific (stable)": 2, - }, - }) - return string(versionStr), nil - } - return "{\"key\":\"mysecurekey\"}", errors.New("multiple fs") - }, - } - context = &clusterd.Context{ - Executor: executorMultiFS, - ConfigDir: configDir, - Clientset: clientset, - } - - // Create another filesystem which should fail - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, "/var/lib/rook/") - assert.Error(t, err) - assert.Equal(t, fmt.Sprintf("failed to create filesystem %q: multiple filesystems are only supported as of ceph pacific", fsName), err.Error()) + t.Run("start basic filesystem", func(t *testing.T) { + // start a basic cluster + err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") + assert.Nil(t, err) + validateStart(ctx, t, context, fs) + assert.ElementsMatch(t, []string{}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) + testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) + }) + + t.Run("start again should no-op", func(t *testing.T) { + err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") + assert.Nil(t, err) + validateStart(ctx, t, context, fs) + assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) + testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) + }) + + t.Run("increasing the number of data pools should be successful.", func(t *testing.T) { + context = &clusterd.Context{ + Executor: executor, + ConfigDir: configDir, + Clientset: clientset} + fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}) + err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") + assert.Nil(t, err) + validateStart(ctx, t, context, fs) + assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) + assert.Equal(t, 1, createDataOnePoolCount) + assert.Equal(t, 1, addDataOnePoolCount) + testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) + }) + + t.Run("multiple filesystem creation", func(t *testing.T) { + context = &clusterd.Context{ + Executor: fsExecutor(t, fsName, configDir, true, &createDataOnePoolCount, &addDataOnePoolCount), + ConfigDir: configDir, + Clientset: clientset, + } - // It works since the Ceph version is Pacific - fsName = "myfs3" - fs = fsTest(fsName) - executor = fsExecutor(t, fsName, configDir, true) - clusterInfo.CephVersion = version.Pacific - context = &clusterd.Context{ - Executor: executor, - ConfigDir: configDir, - Clientset: clientset, - } - err = createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") - assert.NoError(t, err) + // Create another filesystem which should fail + err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, "/var/lib/rook/") + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf("failed to create filesystem %q: multiple filesystems are only supported as of ceph pacific", fsName), err.Error()) + }) + + t.Run("multi filesystem creation now works since ceph version is pacific", func(t *testing.T) { + clusterInfo.CephVersion = version.Pacific + err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") + assert.NoError(t, err) + }) } func TestUpgradeFilesystem(t *testing.T) { @@ -388,7 +352,9 @@ func TestUpgradeFilesystem(t *testing.T) { configDir, _ := ioutil.TempDir("", "") fsName := "myfs" - executor := fsExecutor(t, fsName, configDir, false) + addDataOnePoolCount := 0 + createDataOnePoolCount := 0 + executor := fsExecutor(t, fsName, configDir, false, &createDataOnePoolCount, &addDataOnePoolCount) defer os.RemoveAll(configDir) clientset := testop.New(t, 1) context := &clusterd.Context{ diff --git a/pkg/operator/ceph/file/mds/mds.go b/pkg/operator/ceph/file/mds/mds.go index 6baa9828d756..15c1f18d3f20 100644 --- a/pkg/operator/ceph/file/mds/mds.go +++ b/pkg/operator/ceph/file/mds/mds.go @@ -20,7 +20,6 @@ package mds import ( "context" "fmt" - "strconv" "strings" "syscall" "time" @@ -58,7 +57,6 @@ type Cluster struct { context *clusterd.Context clusterSpec *cephv1.ClusterSpec fs cephv1.CephFilesystem - fsID string ownerInfo *k8sutil.OwnerInfo dataDirHostPath string } @@ -75,7 +73,6 @@ func NewCluster( context *clusterd.Context, clusterSpec *cephv1.ClusterSpec, fs cephv1.CephFilesystem, - fsdetails *cephclient.CephFilesystemDetails, ownerInfo *k8sutil.OwnerInfo, dataDirHostPath string, ) *Cluster { @@ -84,7 +81,6 @@ func NewCluster( context: context, clusterSpec: clusterSpec, fs: fs, - fsID: strconv.Itoa(fsdetails.ID), ownerInfo: ownerInfo, dataDirHostPath: dataDirHostPath, } @@ -233,7 +229,7 @@ func (c *Cluster) isCephUpgrade() (bool, error) { return false, err } if cephver.IsSuperior(c.clusterInfo.CephVersion, *currentVersion) { - logger.Debugf("ceph version for MDS %q is %q and target version is %q", key, currentVersion, c.clusterInfo.CephVersion) + logger.Debugf("ceph version for MDS %q is %q and target version is %q", key, currentVersion.String(), c.clusterInfo.CephVersion.String()) return true, err } } @@ -250,7 +246,8 @@ func (c *Cluster) upgradeMDS() error { return errors.Wrap(err, "failed to setting allow_standby_replay to false") } - // In Pacific, standby-replay daemons are stopped automatically. Older versions of Ceph require us to stop these daemons manually. + // In Pacific, standby-replay daemons are stopped automatically. Older versions of Ceph require + // us to stop these daemons manually. if err := cephclient.FailAllStandbyReplayMDS(c.context, c.clusterInfo, c.fs.Name); err != nil { return errors.Wrap(err, "failed to fail mds agent in up:standby-replay state") } diff --git a/pkg/operator/ceph/file/mds/spec_test.go b/pkg/operator/ceph/file/mds/spec_test.go index d6c9d53e4ae1..864145a92bce 100644 --- a/pkg/operator/ceph/file/mds/spec_test.go +++ b/pkg/operator/ceph/file/mds/spec_test.go @@ -72,7 +72,6 @@ func testDeploymentObject(t *testing.T, network cephv1.NetworkSpec) (*apps.Deplo Network: network, }, fs, - &cephclient.CephFilesystemDetails{ID: 15}, &k8sutil.OwnerInfo{}, "/var/lib/rook/", ) diff --git a/pkg/operator/ceph/file/mirror/config.go b/pkg/operator/ceph/file/mirror/config.go index 4314c51a89e6..2edc5bdf8193 100644 --- a/pkg/operator/ceph/file/mirror/config.go +++ b/pkg/operator/ceph/file/mirror/config.go @@ -41,8 +41,7 @@ const ( var ( // PeerAdditionMinVersion This version includes a number of fixes for snapshots and mirror status - // TODO change me to 16.2.5 - PeerAdditionMinVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 2} + PeerAdditionMinVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 5} ) // daemonConfig for a single rbd-mirror diff --git a/pkg/operator/ceph/file/mirror/controller.go b/pkg/operator/ceph/file/mirror/controller.go index 0f474959cf12..c20f4a8e7390 100644 --- a/pkg/operator/ceph/file/mirror/controller.go +++ b/pkg/operator/ceph/file/mirror/controller.go @@ -177,7 +177,7 @@ func (r *ReconcileFilesystemMirror) reconcile(request reconcile.Request) (reconc } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) return reconcileResponse, nil diff --git a/pkg/operator/ceph/nfs/controller.go b/pkg/operator/ceph/nfs/controller.go index 32f5cd913834..eb7335368f70 100644 --- a/pkg/operator/ceph/nfs/controller.go +++ b/pkg/operator/ceph/nfs/controller.go @@ -31,6 +31,7 @@ import ( opconfig "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/reporting" + "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/k8sutil" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -50,6 +51,9 @@ const ( controllerName = "ceph-nfs-controller" ) +// Version of Ceph where NFS default pool name changes to ".nfs" +var cephNFSChangeVersion = version.CephVersion{Major: 16, Minor: 2, Extra: 7} + var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) // List of object resources to watch by the controller @@ -164,7 +168,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteStore() function since everything is gone already @@ -222,11 +226,39 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul return reconcile.Result{}, nil } + // Octopus: Customization is allowed, so don't change the pool and namespace + // Pacific before 16.2.7: No customization, default pool name is nfs-ganesha + // Pacific after 16.2.7: No customization, default pool name is .nfs + // This code is changes the pool and namespace to the correct values if the version is Pacific. + // If the version precedes Pacific it doesn't change it at all and the values used are what the user provided in the spec. + if r.clusterInfo.CephVersion.IsAtLeastPacific() { + if r.clusterInfo.CephVersion.IsAtLeast(cephNFSChangeVersion) { + cephNFS.Spec.RADOS.Pool = postNFSChangeDefaultPoolName + } else { + cephNFS.Spec.RADOS.Pool = preNFSChangeDefaultPoolName + } + cephNFS.Spec.RADOS.Namespace = cephNFS.Name + } else { + // This handles the case where the user has not provided a pool name and the cluster version + // is Octopus. We need to do this since the pool name is optional in the API due to the + // changes in Pacific defaulting to the ".nfs" pool. + // We default to the new name so that nothing will break on upgrades + if cephNFS.Spec.RADOS.Pool == "" { + cephNFS.Spec.RADOS.Pool = postNFSChangeDefaultPoolName + } + } + // validate the store settings if err := validateGanesha(r.context, r.clusterInfo, cephNFS); err != nil { return reconcile.Result{}, errors.Wrapf(err, "invalid ceph nfs %q arguments", cephNFS.Name) } + // Always create the default pool + err = r.createDefaultNFSRADOSPool(cephNFS) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to create default pool %q", cephNFS.Spec.RADOS.Pool) + } + // CREATE/UPDATE logger.Debug("reconciling ceph nfs deployments") _, err = r.reconcileCreateCephNFS(cephNFS) diff --git a/pkg/operator/ceph/nfs/controller_test.go b/pkg/operator/ceph/nfs/controller_test.go index c013e26d5d47..55499f4f65fd 100644 --- a/pkg/operator/ceph/nfs/controller_test.go +++ b/pkg/operator/ceph/nfs/controller_test.go @@ -19,11 +19,11 @@ package nfs import ( "context" - "errors" "os" "testing" "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" "github.com/rook/rook/pkg/client/clientset/versioned/scheme" @@ -51,25 +51,6 @@ var ( "ceph version 14.2.8 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) nautilus (stable)": 3 } }` - poolDetails = `{ - "pool": "foo", - "pool_id": 1, - "size": 3, - "min_size": 2, - "pg_num": 8, - "pgp_num": 8, - "crush_rule": "replicated_rule", - "hashpspool": true, - "nodelete": false, - "nopgchange": false, - "nosizechange": false, - "write_fadvise_dontneed": false, - "noscrub": false, - "nodeep-scrub": false, - "use_gmt_hitset": true, - "fast_read": 0, - "pg_autoscale_mode": "on" - }` ) func TestCephNFSController(t *testing.T) { @@ -217,10 +198,16 @@ func TestCephNFSController(t *testing.T) { if args[0] == "versions" { return dummyVersionsRaw, nil } - if args[0] == "osd" && args[1] == "pool" && args[2] == "get" { - return poolDetails, nil + if args[0] == "osd" && args[1] == "pool" && args[2] == "create" { + return "", nil } - return "", errors.New("unknown command") + if args[0] == "osd" && args[1] == "crush" && args[2] == "rule" { + return "", nil + } + if args[0] == "osd" && args[1] == "pool" && args[2] == "application" { + return "", nil + } + return "", errors.Errorf("unknown command %q %v", command, args) }, MockExecuteCommand: func(command string, args ...string) error { if command == "rados" { diff --git a/pkg/operator/ceph/nfs/nfs.go b/pkg/operator/ceph/nfs/nfs.go index 11bfc8f2012e..197eb70c8703 100644 --- a/pkg/operator/ceph/nfs/nfs.go +++ b/pkg/operator/ceph/nfs/nfs.go @@ -37,6 +37,10 @@ import ( const ( ganeshaRadosGraceCmd = "ganesha-rados-grace" + // Default RADOS pool name after the NFS changes in Ceph + postNFSChangeDefaultPoolName = ".nfs" + // Default RADOS pool name before the NFS changes in Ceph + preNFSChangeDefaultPoolName = "nfs-ganesha" ) var updateDeploymentAndWait = opmon.UpdateCephDeploymentAndWait @@ -268,15 +272,29 @@ func validateGanesha(context *clusterd.Context, clusterInfo *cephclient.ClusterI return errors.New("missing RADOS.pool") } + if n.Spec.RADOS.Namespace == "" { + return errors.New("missing RADOS.namespace") + } + // Ganesha server properties if n.Spec.Server.Active == 0 { return errors.New("at least one active server required") } - // The existence of the pool provided in n.Spec.RADOS.Pool is necessary otherwise addRADOSConfigFile() will fail - _, err := cephclient.GetPoolDetails(context, clusterInfo, n.Spec.RADOS.Pool) + return nil +} + +// create and enable default RADOS pool +func (r *ReconcileCephNFS) createDefaultNFSRADOSPool(n *cephv1.CephNFS) error { + poolName := n.Spec.RADOS.Pool + // Settings are not always declared and CreateReplicatedPoolForApp does not accept a pointer for + // the pool spec + if n.Spec.RADOS.PoolConfig == nil { + n.Spec.RADOS.PoolConfig = &cephv1.PoolSpec{} + } + err := cephclient.CreateReplicatedPoolForApp(r.context, r.clusterInfo, r.cephClusterSpec, poolName, *n.Spec.RADOS.PoolConfig, cephclient.DefaultPGCount, "nfs") if err != nil { - return errors.Wrapf(err, "pool %q not found", n.Spec.RADOS.Pool) + return err } return nil diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go index 6b78cb80381c..54f7c3cac471 100644 --- a/pkg/operator/ceph/object/admin.go +++ b/pkg/operator/ceph/object/admin.go @@ -18,11 +18,16 @@ package object import ( "context" + "encoding/json" "fmt" + "net/http" + "net/http/httputil" "regexp" + "strings" "github.com/ceph/go-ceph/rgw/admin" "github.com/coreos/pkg/capnslog" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" @@ -55,6 +60,40 @@ type AdminOpsContext struct { AdminOpsClient *admin.API } +type debugHTTPClient struct { + client admin.HTTPClient + logger *capnslog.PackageLogger +} + +// NewDebugHTTPClient helps us mutating the HTTP client to debug the request/response +func NewDebugHTTPClient(client admin.HTTPClient, logger *capnslog.PackageLogger) *debugHTTPClient { + return &debugHTTPClient{client, logger} +} + +func (c *debugHTTPClient) Do(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, true) + if err != nil { + return nil, err + } + // this can leak credentials for making requests + c.logger.Tracef("\n%s\n", string(dump)) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + dump, err = httputil.DumpResponse(resp, true) + if err != nil { + return nil, err + } + // this can leak any sensitive info like credentials in the response + c.logger.Tracef("\n%s\n", string(dump)) + + return resp, nil +} + const ( // RGWAdminOpsUserSecretName is the secret name of the admin ops user // #nosec G101 since this is not leaking any hardcoded credentials, it's just the secret name @@ -116,17 +155,25 @@ func NewMultisiteAdminOpsContext( return nil, errors.Wrapf(err, "failed to create or retrieve rgw admin ops user") } - httpClient, tlsCert, err := GenObjectStoreHTTPClient(objContext, spec) + httpClient, tlsCert, err := genObjectStoreHTTPClientFunc(objContext, spec) if err != nil { return nil, err } - client, err := admin.New(objContext.Endpoint, accessKey, secretKey, httpClient) - if err != nil { - return nil, errors.Wrap(err, "failed to build admin ops API connection") - } + + // If DEBUG level is set we will mutate the HTTP client for printing request and response + var client *admin.API if logger.LevelAt(capnslog.DEBUG) { - client.Debug = true + client, err = admin.New(objContext.Endpoint, accessKey, secretKey, NewDebugHTTPClient(httpClient, logger)) + if err != nil { + return nil, errors.Wrap(err, "failed to build admin ops API connection") + } + } else { + client, err = admin.New(objContext.Endpoint, accessKey, secretKey, httpClient) + if err != nil { + return nil, errors.Wrap(err, "failed to build admin ops API connection") + } } + return &AdminOpsContext{ Context: *objContext, TlsCert: tlsCert, @@ -172,7 +219,7 @@ func RunAdminCommandNoMultisite(c *Context, expectJSON bool, args ...string) (st output, stderr, err = c.Context.RemoteExecutor.ExecCommandInContainerWithFullOutputWithTimeout(cephclient.ProxyAppLabel, cephclient.CommandProxyInitContainerName, c.clusterInfo.Namespace, append([]string{"radosgw-admin"}, args...)...) } else { command, args := cephclient.FinalizeCephCommandArgs("radosgw-admin", c.clusterInfo, args, c.Context.ConfigDir) - output, err = c.Context.Executor.ExecuteCommandWithTimeout(exec.CephCommandTimeout, command, args...) + output, err = c.Context.Executor.ExecuteCommandWithTimeout(exec.CephCommandsTimeout, command, args...) } if err != nil { @@ -250,6 +297,121 @@ func isInvalidFlagError(err error) bool { return exitCode == 22 } +// CommitConfigChanges commits changes to RGW configs for realm/zonegroup/zone changes idempotently. +// Under the hood, this updates the RGW config period and commits the change if changes are detected. +func CommitConfigChanges(c *Context) error { + currentPeriod, err := runAdminCommand(c, true, "period", "get") + if err != nil { + return errorOrIsNotFound(err, "failed to get the current RGW configuration period to see if it needs changed") + } + + // this stages the current config changees and returns what the new period config will look like + // without committing the changes + stagedPeriod, err := runAdminCommand(c, true, "period", "update") + if err != nil { + return errorOrIsNotFound(err, "failed to stage the current RGW configuration period") + } + + shouldCommit, err := periodWillChange(currentPeriod, stagedPeriod) + if err != nil { + return errors.Wrap(err, "failed to determine if the staged RGW configuration period is different from current") + } + + // DO NOT MODIFY nsName here. It is part of the integration test checks noted below. + nsName := fmt.Sprintf("%s/%s", c.clusterInfo.Namespace, c.Name) + if !shouldCommit { + // DO NOT MODIFY THE MESSAGE BELOW. It is checked in integration tests. + logger.Infof("there are no changes to commit for RGW configuration period for CephObjectStore %q", nsName) + return nil + } + // DO NOT MODIFY THE MESSAGE BELOW. It is checked in integration tests. + logger.Infof("committing changes to RGW configuration period for CephObjectStore %q", nsName) + // don't expect json output since we don't intend to use the output from the command + _, err = runAdminCommand(c, false, "period", "update", "--commit") + if err != nil { + return errorOrIsNotFound(err, "failed to commit RGW configuration period changes") + } + + return nil +} + +// return true if the configuration period will change if the staged period is committed +func periodWillChange(current, staged string) (bool, error) { + // Rook wants to check if there are any differences in the current period versus the period that + // is staged to be applied/committed. If there are differences, then Rook should "commit" the + // staged period changes to instruct RGWs to update their runtime configuration. + // + // For many RGW interactions, Rook often creates a typed struct to unmarshal RGW JSON output + // into. In those cases Rook is able to opt in to only a small subset of specific fields it + // needs. This keeps the coupling between Rook and RGW JSON output as loose as possible while + // still being specific enough for Rook to operate. + // + // For this implementation, we could use a strongly-typed struct here to unmarshal into, and we + // could use DisallowUnknownFields() to cause an error if the RGW JSON output changes to flag + // when the existing implementation might be invalidated. This relies on an extremely tight + // coupling between Rook and the JSON output from RGW. The failure mode of this implementation + // is to return an error from the reconcile when there are unmarshalling errors, which results + // in CephObjectStores that could not be updated if a version of Ceph changes the RGW output. + // + // In the chosen implementation, we unmarshal into "dumb" data structures that create a loose + // coupling. With these, we must ignore the fields that we have observed to change between the + // current and staged periods when we should *not* commit an un-changed period. The failure mode + // of this implementation is that if the RGW output changes its structure, Rook may detect + // differences where there are none. This would result in Rook committing the period more often + // than necessary. Committing the period results in a short period of downtime while RGWs reload + // their configuration, but we opt for this inconvenience in lieu of blocking reconciliation. + // + // For any implementation, if the RGW changes the behavior of its output but not the structure, + // Rook could commit unnecessary period changes or fail to commit necessary period changes + // depending on how the RGW output has changed. Rook cannot detect this class of failures, and + // the behavior cannot be specifically known. + var currentJSON map[string]interface{} + var stagedJSON map[string]interface{} + var err error + + err = json.Unmarshal([]byte(current), ¤tJSON) + if err != nil { + return true, errors.Wrap(err, "failed to unmarshal current RGW configuration period") + } + err = json.Unmarshal([]byte(staged), &stagedJSON) + if err != nil { + return true, errors.Wrap(err, "failed to unmarshal staged RGW configuration period") + } + + // There are some values in the periods that we don't care to diff because they are always + // different in the staged period, even when no updates are needed. Sometimes, the values are + // reported as different in the staging output but aren't actually changed upon commit. + ignorePaths := cmp.FilterPath(func(path cmp.Path) bool { + // path.String() outputs nothing for the crude map[string]interface{} JSON struct + // Example of path.GoString() output for a long path in the period JSON: + // root["period_map"].(map[string]interface {})["short_zone_ids"].([]interface {})[0].(map[string]interface {})["val"].(float64) + switch path.GoString() { + case `root["id"]`: + // "id" is always changed in the staged period, but it doesn't always update. + return true + case `root["predecessor_uuid"]`: + // "predecessor_uuid" is always changed in the staged period, but it doesn't always update. + return true + case `root["realm_epoch"]`: + // "realm_epoch" is always incremented in the staged period, but it doesn't always increment. + return true + case `root["epoch"]`: + // Strangely, "epoch" is not incremented in the staged period even though it is always + // incremented upon an actual commit. It could be argued that this behavior is a bug. + // Ignore this value to handle the possibility that the behavior changes in the future. + return true + default: + return false + } + }, cmp.Ignore()) + + diff := cmp.Diff(currentJSON, stagedJSON, ignorePaths) + diff = strings.TrimSpace(diff) + logger.Debugf("RGW config period diff:\n%s", diff) + + return (diff != ""), nil +} + func GetAdminOPSUserCredentials(objContext *Context, spec *cephv1.ObjectStoreSpec) (string, string, error) { ns := objContext.clusterInfo.Namespace diff --git a/pkg/operator/ceph/object/admin_test.go b/pkg/operator/ceph/object/admin_test.go index 5e05060b2342..30b84e23ee51 100644 --- a/pkg/operator/ceph/object/admin_test.go +++ b/pkg/operator/ceph/object/admin_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/pkg/errors" v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/daemon/ceph/client" @@ -158,3 +159,574 @@ func TestRunAdminCommandNoMultisite(t *testing.T) { assert.EqualError(t, err, "no pods found with selector \"rook-ceph-mgr\"") }) } + +func TestCommitConfigChanges(t *testing.T) { + // control the return values from calling get/update on period + type commandReturns struct { + periodGetOutput string // empty implies error + periodUpdateOutput string // empty implies error + periodCommitError bool + } + + // control whether we should expect certain 'get' calls + type expectCommands struct { + // note: always expect period get to be called + periodUpdate bool + periodCommit bool + } + + // vars used to check if commands were called + var ( + periodGetCalled = false + periodUpdateCalled = false + periodCommitCalled = false + ) + + setupTest := func(returns commandReturns) *clusterd.Context { + // reset vars for checking if commands were called + periodGetCalled = false + periodUpdateCalled = false + periodCommitCalled = false + + executor := &exectest.MockExecutor{ + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { + if command == "radosgw-admin" { + if args[0] == "period" { + if args[1] == "get" { + periodGetCalled = true + if returns.periodGetOutput == "" { + return "", errors.New("fake period get error") + } + return returns.periodGetOutput, nil + } + if args[1] == "update" { + if args[2] == "--commit" { + periodCommitCalled = true + if returns.periodCommitError { + return "", errors.New("fake period update --commit error") + } + return "", nil // success + } + periodUpdateCalled = true + if returns.periodUpdateOutput == "" { + return "", errors.New("fake period update (no --commit) error") + } + return returns.periodUpdateOutput, nil + } + } + } + + t.Fatalf("unhandled command: %s %v", command, args) + panic("unhandled command") + }, + } + + return &clusterd.Context{ + Executor: executor, + } + } + + expectNoErr := false // want no error + expectErr := true // want an error + + tests := []struct { + name string + commandReturns commandReturns + expectCommands expectCommands + wantErr bool + }{ + // a bit more background: creating a realm creates the first period epoch. When Rook creates + // zonegroup and zone, it results in many changes to the period. + {"real-world first reconcile (many changes, should commit period)", + commandReturns{ + periodGetOutput: firstPeriodGet, + periodUpdateOutput: firstPeriodUpdate, + }, + expectCommands{ + periodUpdate: true, + periodCommit: true, + }, + expectNoErr, + }, + // note: this also tests that we support the output changing in the future to increment "epoch" + {"real-world second reconcile (no changes, should not commit period)", + commandReturns{ + periodGetOutput: secondPeriodGet, + periodUpdateOutput: secondPeriodUpdateWithoutChanges, + }, + expectCommands{ + periodUpdate: true, + periodCommit: false, + }, + expectNoErr, + }, + {"second reconcile with changes", + commandReturns{ + periodGetOutput: secondPeriodGet, + periodUpdateOutput: secondPeriodUpdateWithChanges, + }, + expectCommands{ + periodUpdate: true, + periodCommit: true, + }, + expectNoErr, + }, + {"invalid get json", + commandReturns{ + periodGetOutput: `{"ids": [}`, // json obj with incomplete array that won't parse + periodUpdateOutput: firstPeriodUpdate, + }, + expectCommands{ + periodUpdate: true, + periodCommit: false, + }, + expectErr, + }, + {"invalid update json", + commandReturns{ + periodGetOutput: firstPeriodGet, + periodUpdateOutput: `{"ids": [}`, + }, + expectCommands{ + periodUpdate: true, + periodCommit: false, + }, + expectErr, + }, + {"fail period get", + commandReturns{ + periodGetOutput: "", // error + periodUpdateOutput: firstPeriodUpdate, + }, + expectCommands{ + periodUpdate: false, + periodCommit: false, + }, + expectErr, + }, + {"fail period update", + commandReturns{ + periodGetOutput: firstPeriodGet, + periodUpdateOutput: "", // error + }, + expectCommands{ + periodUpdate: true, + periodCommit: false, + }, + expectErr, + }, + {"fail period commit", + commandReturns{ + periodGetOutput: firstPeriodGet, + periodUpdateOutput: firstPeriodUpdate, + periodCommitError: true, + }, + expectCommands{ + periodUpdate: true, + periodCommit: true, + }, + expectErr, + }, + {"configs are removed", + commandReturns{ + periodGetOutput: secondPeriodUpdateWithChanges, + periodUpdateOutput: secondPeriodUpdateWithoutChanges, + }, + expectCommands{ + periodUpdate: true, + periodCommit: true, + }, + expectNoErr, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := setupTest(tt.commandReturns) + objCtx := NewContext(ctx, &client.ClusterInfo{Namespace: "my-cluster"}, "my-store") + + err := CommitConfigChanges(objCtx) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.True(t, periodGetCalled) + assert.Equal(t, tt.expectCommands.periodUpdate, periodUpdateCalled) + assert.Equal(t, tt.expectCommands.periodCommit, periodCommitCalled) + }) + } +} + +// example real-world output from 'radosgw-admin period get' after initial realm, zonegroup, and +// zone creation and before 'radosgw-admin period update --commit' +const firstPeriodGet = `{ + "id": "5338e008-26db-4013-92f5-c51505a917e2", + "epoch": 1, + "predecessor_uuid": "", + "sync_status": [], + "period_map": { + "id": "5338e008-26db-4013-92f5-c51505a917e2", + "zonegroups": [], + "short_zone_ids": [] + }, + "master_zonegroup": "", + "master_zone": "", + "period_config": { + "bucket_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + }, + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } + }, + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "realm_name": "my-store", + "realm_epoch": 1 +}` + +// example real-world output from 'radosgw-admin period update' after initial realm, zonegroup, and +// zone creation and before 'radosgw-admin period update --commit' +const firstPeriodUpdate = `{ + "id": "94ba560d-a560-431d-8ed4-85a2891f9122:staging", + "epoch": 1, + "predecessor_uuid": "5338e008-26db-4013-92f5-c51505a917e2", + "sync_status": [], + "period_map": { + "id": "5338e008-26db-4013-92f5-c51505a917e2", + "zonegroups": [ + { + "id": "1580fd1d-a065-4484-82ff-329e9a779999", + "name": "my-store", + "api_name": "my-store", + "is_master": "true", + "endpoints": [ + "http://10.105.59.166:80" + ], + "hostnames": [], + "hostnames_s3website": [], + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "zones": [ + { + "id": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "name": "my-store", + "endpoints": [ + "http://10.105.59.166:80" + ], + "log_meta": "false", + "log_data": "false", + "bucket_index_max_shards": 11, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [], + "redirect_zone": "" + } + ], + "placement_targets": [ + { + "name": "default-placement", + "tags": [], + "storage_classes": [ + "STANDARD" + ] + } + ], + "default_placement": "default-placement", + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "sync_policy": { + "groups": [] + } + } + ], + "short_zone_ids": [ + { + "key": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "val": 1698422904 + } + ] + }, + "master_zonegroup": "1580fd1d-a065-4484-82ff-329e9a779999", + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "period_config": { + "bucket_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + }, + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } + }, + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "realm_name": "my-store", + "realm_epoch": 2 +}` + +// example real-world output from 'radosgw-admin period get' after the first period commit +const secondPeriodGet = `{ + "id": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "epoch": 1, + "predecessor_uuid": "5338e008-26db-4013-92f5-c51505a917e2", + "sync_status": [], + "period_map": { + "id": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "zonegroups": [ + { + "id": "1580fd1d-a065-4484-82ff-329e9a779999", + "name": "my-store", + "api_name": "my-store", + "is_master": "true", + "endpoints": [ + "http://10.105.59.166:80" + ], + "hostnames": [], + "hostnames_s3website": [], + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "zones": [ + { + "id": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "name": "my-store", + "endpoints": [ + "http://10.105.59.166:80" + ], + "log_meta": "false", + "log_data": "false", + "bucket_index_max_shards": 11, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [], + "redirect_zone": "" + } + ], + "placement_targets": [ + { + "name": "default-placement", + "tags": [], + "storage_classes": [ + "STANDARD" + ] + } + ], + "default_placement": "default-placement", + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "sync_policy": { + "groups": [] + } + } + ], + "short_zone_ids": [ + { + "key": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "val": 1698422904 + } + ] + }, + "master_zonegroup": "1580fd1d-a065-4484-82ff-329e9a779999", + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "period_config": { + "bucket_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + }, + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } + }, + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "realm_name": "my-store", + "realm_epoch": 2 +}` + +// example real-world output from 'radosgw-admin period update' after the first period commit, +// and with no changes since the first commit +// note: output was modified to increment the epoch to make sure this code works in case the "epoch" +// behavior changes in radosgw-admin in the future +const secondPeriodUpdateWithoutChanges = `{ + "id": "94ba560d-a560-431d-8ed4-85a2891f9122:staging", + "epoch": 2, + "predecessor_uuid": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "sync_status": [], + "period_map": { + "id": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "zonegroups": [ + { + "id": "1580fd1d-a065-4484-82ff-329e9a779999", + "name": "my-store", + "api_name": "my-store", + "is_master": "true", + "endpoints": [ + "http://10.105.59.166:80" + ], + "hostnames": [], + "hostnames_s3website": [], + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "zones": [ + { + "id": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "name": "my-store", + "endpoints": [ + "http://10.105.59.166:80" + ], + "log_meta": "false", + "log_data": "false", + "bucket_index_max_shards": 11, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [], + "redirect_zone": "" + } + ], + "placement_targets": [ + { + "name": "default-placement", + "tags": [], + "storage_classes": [ + "STANDARD" + ] + } + ], + "default_placement": "default-placement", + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "sync_policy": { + "groups": [] + } + } + ], + "short_zone_ids": [ + { + "key": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "val": 1698422904 + } + ] + }, + "master_zonegroup": "1580fd1d-a065-4484-82ff-329e9a779999", + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "period_config": { + "bucket_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + }, + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } + }, + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "realm_name": "my-store", + "realm_epoch": 3 +}` + +// example output from 'radosgw-admin period update' after the first period commit, +// and with un-committed changes since the first commit (endpoint added to zonegroup and zone) +const secondPeriodUpdateWithChanges = `{ + "id": "94ba560d-a560-431d-8ed4-85a2891f9122:staging", + "epoch": 1, + "predecessor_uuid": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "sync_status": [], + "period_map": { + "id": "600c23a6-2452-4fc0-96b4-0c78b9b7c439", + "zonegroups": [ + { + "id": "1580fd1d-a065-4484-82ff-329e9a779999", + "name": "my-store", + "api_name": "my-store", + "is_master": "true", + "endpoints": [ + "http://10.105.59.166:80", + "https://10.105.59.166:443" + ], + "hostnames": [], + "hostnames_s3website": [], + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "zones": [ + { + "id": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "name": "my-store", + "endpoints": [ + "http://10.105.59.166:80", + "https://10.105.59.166:443" + ], + "log_meta": "false", + "log_data": "false", + "bucket_index_max_shards": 11, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [], + "redirect_zone": "" + } + ], + "placement_targets": [ + { + "name": "default-placement", + "tags": [], + "storage_classes": [ + "STANDARD" + ] + } + ], + "default_placement": "default-placement", + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "sync_policy": { + "groups": [] + } + } + ], + "short_zone_ids": [ + { + "key": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "val": 1698422904 + } + ] + }, + "master_zonegroup": "1580fd1d-a065-4484-82ff-329e9a779999", + "master_zone": "cea71d3a-9d22-45fb-a4e8-04fc6a494a50", + "period_config": { + "bucket_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + }, + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } + }, + "realm_id": "94ba560d-a560-431d-8ed4-85a2891f9122", + "realm_name": "my-store", + "realm_epoch": 3 +}` diff --git a/pkg/operator/ceph/object/bucket/provisioner.go b/pkg/operator/ceph/object/bucket/provisioner.go index 1947e389f352..4d989be5b378 100644 --- a/pkg/operator/ceph/object/bucket/provisioner.go +++ b/pkg/operator/ceph/object/bucket/provisioner.go @@ -29,6 +29,7 @@ import ( bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" apibkt "github.com/kube-object-storage/lib-bucket-provisioner/pkg/provisioner/api" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/object" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -55,6 +56,7 @@ type Provisioner struct { endpoint string additionalConfigData map[string]string tlsCert []byte + insecureTLS bool adminOpsClient *admin.API } @@ -81,7 +83,7 @@ func (p Provisioner) Provision(options *apibkt.BucketOptions) (*bktv1alpha1.Obje return nil, errors.Wrap(err, "Provision: can't create ceph user") } - s3svc, err := cephObject.NewS3Agent(p.accessKeyID, p.secretAccessKey, p.getObjectStoreEndpoint(), p.adminOpsClient.Debug, p.tlsCert) + s3svc, err := cephObject.NewS3Agent(p.accessKeyID, p.secretAccessKey, p.getObjectStoreEndpoint(), p.region, logger.LevelAt(capnslog.DEBUG), p.tlsCert) if err != nil { p.deleteOBCResourceLogError("") return nil, err @@ -158,7 +160,7 @@ func (p Provisioner) Grant(options *apibkt.BucketOptions) (*bktv1alpha1.ObjectBu return nil, errors.Wrapf(err, "failed to get user %q", stats.Owner) } - s3svc, err := cephObject.NewS3Agent(objectUser.Keys[0].AccessKey, objectUser.Keys[0].SecretKey, p.getObjectStoreEndpoint(), p.adminOpsClient.Debug, p.tlsCert) + s3svc, err := cephObject.NewS3Agent(objectUser.Keys[0].AccessKey, objectUser.Keys[0].SecretKey, p.getObjectStoreEndpoint(), p.region, logger.LevelAt(capnslog.DEBUG), p.tlsCert) if err != nil { p.deleteOBCResourceLogError("") return nil, err @@ -254,7 +256,7 @@ func (p Provisioner) Revoke(ob *bktv1alpha1.ObjectBucket) error { return err } - s3svc, err := cephObject.NewS3Agent(user.Keys[0].AccessKey, user.Keys[0].SecretKey, p.getObjectStoreEndpoint(), p.adminOpsClient.Debug, p.tlsCert) + s3svc, err := cephObject.NewS3Agent(user.Keys[0].AccessKey, user.Keys[0].SecretKey, p.getObjectStoreEndpoint(), p.region, logger.LevelAt(capnslog.DEBUG), p.tlsCert) if err != nil { return err } @@ -553,8 +555,8 @@ func (p *Provisioner) deleteOBCResourceLogError(bucketname string) { // Check for additional options mentioned in OBC and set them accordingly func (p Provisioner) setAdditionalSettings(options *apibkt.BucketOptions) error { quotaEnabled := true - maxObjects := MaxObjectQuota(options) - maxSize := MaxSizeQuota(options) + maxObjects := MaxObjectQuota(options.ObjectBucketClaim.Spec.AdditionalConfig) + maxSize := MaxSizeQuota(options.ObjectBucketClaim.Spec.AdditionalConfig) if maxObjects == "" && maxSize == "" { return nil } @@ -606,7 +608,7 @@ func (p *Provisioner) setTlsCaCert() error { } p.tlsCert = make([]byte, 0) if objStore.Spec.Gateway.SecurePort == p.storePort { - p.tlsCert, err = cephObject.GetTlsCaCert(p.objectContext, &objStore.Spec) + p.tlsCert, p.insecureTLS, err = cephObject.GetTlsCaCert(p.objectContext, &objStore.Spec) if err != nil { return err } @@ -621,7 +623,7 @@ func (p *Provisioner) setAdminOpsAPIClient() error { Timeout: cephObject.HttpTimeOut, } if p.tlsCert != nil { - httpClient.Transport = cephObject.BuildTransportTLS(p.tlsCert) + httpClient.Transport = cephObject.BuildTransportTLS(p.tlsCert, p.insecureTLS) } // Fetch the ceph object store @@ -649,15 +651,84 @@ func (p *Provisioner) setAdminOpsAPIClient() error { // Build endpoint s3endpoint := cephObject.BuildDNSEndpoint(cephObject.BuildDomainName(p.objectContext.Name, cephObjectStore.Namespace), p.storePort, cephObjectStore.Spec.IsTLSEnabled()) - // Initialize object store admin ops API - adminOpsClient, err := admin.New(s3endpoint, accessKey, secretKey, httpClient) + // If DEBUG level is set we will mutate the HTTP client for printing request and response + if logger.LevelAt(capnslog.DEBUG) { + p.adminOpsClient, err = admin.New(s3endpoint, accessKey, secretKey, object.NewDebugHTTPClient(httpClient, logger)) + if err != nil { + return errors.Wrap(err, "failed to build admin ops API connection") + } + } else { + p.adminOpsClient, err = admin.New(s3endpoint, accessKey, secretKey, httpClient) + if err != nil { + return errors.Wrap(err, "failed to build admin ops API connection") + } + } + + return nil +} +func (p Provisioner) updateAdditionalSettings(ob *bktv1alpha1.ObjectBucket) error { + var maxObjectsInt64 int64 + var maxSizeInt64 int64 + var err error + var quotaEnabled bool + maxObjects := MaxObjectQuota(ob.Spec.Endpoint.AdditionalConfigData) + maxSize := MaxSizeQuota(ob.Spec.Endpoint.AdditionalConfigData) + if maxObjects != "" { + maxObjectsInt, err := strconv.Atoi(maxObjects) + if err != nil { + return errors.Wrap(err, "failed to convert maxObjects to integer") + } + maxObjectsInt64 = int64(maxObjectsInt) + } + if maxSize != "" { + maxSizeInt64, err = maxSizeToInt64(maxSize) + if err != nil { + return errors.Wrapf(err, "failed to parse maxSize quota for user %q", p.cephUserName) + } + } + objectUser, err := p.adminOpsClient.GetUser(context.TODO(), admin.User{ID: ob.Spec.Connection.AdditionalState[cephUser]}) if err != nil { - return errors.Wrap(err, "failed to build object store admin ops API connection") + return errors.Wrapf(err, "failed to fetch user %q", p.cephUserName) } - if logger.LevelAt(capnslog.DEBUG) { - adminOpsClient.Debug = true + if *objectUser.UserQuota.Enabled && + (maxObjects == "" || maxObjectsInt64 < 0) && + (maxSize == "" || maxSizeInt64 < 0) { + quotaEnabled = false + err = p.adminOpsClient.SetUserQuota(context.TODO(), admin.QuotaSpec{UID: p.cephUserName, Enabled: "aEnabled}) + if err != nil { + return errors.Wrapf(err, "failed to disable quota to user %q", p.cephUserName) + } + return nil + } + + quotaEnabled = true + quotaSpec := admin.QuotaSpec{UID: p.cephUserName, Enabled: "aEnabled} + + //MaxObject is modified + if maxObjects != "" && (maxObjectsInt64 != *objectUser.UserQuota.MaxObjects) { + quotaSpec.MaxObjects = &maxObjectsInt64 + } + + //MaxSize is modified + if maxSize != "" && (maxSizeInt64 != *objectUser.UserQuota.MaxSize) { + quotaSpec.MaxSize = &maxSizeInt64 + } + err = p.adminOpsClient.SetUserQuota(context.TODO(), quotaSpec) + if err != nil { + return errors.Wrapf(err, "failed to update quota to user %q", p.cephUserName) } - p.adminOpsClient = adminOpsClient return nil } + +// Update is sent when only there is modification to AdditionalConfig field in OBC +func (p Provisioner) Update(ob *bktv1alpha1.ObjectBucket) error { + logger.Debugf("Update event for OB: %+v", ob) + + err := p.initializeDeleteOrRevoke(ob) + if err != nil { + return err + } + + return p.updateAdditionalSettings(ob) +} diff --git a/pkg/operator/ceph/object/bucket/util.go b/pkg/operator/ceph/object/bucket/util.go index 71faa16cf8ef..9fd4ed6efbe5 100644 --- a/pkg/operator/ceph/object/bucket/util.go +++ b/pkg/operator/ceph/object/bucket/util.go @@ -23,7 +23,6 @@ import ( "github.com/coreos/pkg/capnslog" bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1" "github.com/kube-object-storage/lib-bucket-provisioner/pkg/provisioner" - apibkt "github.com/kube-object-storage/lib-bucket-provisioner/pkg/provisioner/api" "github.com/pkg/errors" storagev1 "k8s.io/api/storage/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -117,10 +116,10 @@ func randomString(n int) string { return string(b) } -func MaxObjectQuota(options *apibkt.BucketOptions) string { - return options.ObjectBucketClaim.Spec.AdditionalConfig["maxObjects"] +func MaxObjectQuota(AdditionalConfig map[string]string) string { + return AdditionalConfig["maxObjects"] } -func MaxSizeQuota(options *apibkt.BucketOptions) string { - return options.ObjectBucketClaim.Spec.AdditionalConfig["maxSize"] +func MaxSizeQuota(AdditionalConfig map[string]string) string { + return AdditionalConfig["maxSize"] } diff --git a/pkg/operator/ceph/object/config.go b/pkg/operator/ceph/object/config.go index be60afe25fa3..c6e39e0f0021 100644 --- a/pkg/operator/ceph/object/config.go +++ b/pkg/operator/ceph/object/config.go @@ -37,14 +37,21 @@ caps mon = "allow rw" caps osd = "allow rwx" ` - certVolumeName = "rook-ceph-rgw-cert" - certDir = "/etc/ceph/private" - certKeyName = "cert" - certFilename = "rgw-cert.pem" - certKeyFileName = "rgw-key.pem" - rgwPortInternalPort int32 = 8080 - ServiceServingCertCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" - HttpTimeOut = time.Second * 15 + caBundleVolumeName = "rook-ceph-custom-ca-bundle" + caBundleUpdatedVolumeName = "rook-ceph-ca-bundle-updated" + caBundleTrustedDir = "/etc/pki/ca-trust/" + caBundleSourceCustomDir = caBundleTrustedDir + "source/anchors/" + caBundleExtractedDir = caBundleTrustedDir + "extracted/" + caBundleKeyName = "cabundle" + caBundleFileName = "custom-ca-bundle.crt" + certVolumeName = "rook-ceph-rgw-cert" + certDir = "/etc/ceph/private" + certKeyName = "cert" + certFilename = "rgw-cert.pem" + certKeyFileName = "rgw-key.pem" + rgwPortInternalPort int32 = 8080 + ServiceServingCertCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" + HttpTimeOut = time.Second * 15 ) var ( @@ -72,7 +79,7 @@ func (c *clusterConfig) portString() string { portString = fmt.Sprintf("ssl_port=%d ssl_certificate=%s", c.store.Spec.Gateway.SecurePort, certPath) } - secretType, _ := c.rgwTLSSecretType() + secretType, _ := c.rgwTLSSecretType(c.store.Spec.Gateway.SSLCertificateRef) if c.store.Spec.GetServiceServingCert() != "" || secretType == v1.SecretTypeTLS { privateKey := path.Join(certDir, certKeyFileName) portString = fmt.Sprintf("%s ssl_private_key=%s", portString, privateKey) @@ -101,16 +108,16 @@ func (c *clusterConfig) generateKeyring(rgwConfig *rgwConfig) (string, error) { return keyring, s.CreateOrUpdate(rgwConfig.ResourceName, keyring) } -func (c *clusterConfig) setDefaultFlagsMonConfigStore(rgwName string) error { +func (c *clusterConfig) setDefaultFlagsMonConfigStore(rgwConfig *rgwConfig) error { monStore := cephconfig.GetMonStore(c.context, c.clusterInfo) - who := generateCephXUser(rgwName) + who := generateCephXUser(rgwConfig.ResourceName) configOptions := make(map[string]string) configOptions["rgw_log_nonexistent_bucket"] = "true" configOptions["rgw_log_object_name_utc"] = "true" configOptions["rgw_enable_usage_log"] = "true" - configOptions["rgw_zone"] = c.store.Name - configOptions["rgw_zonegroup"] = c.store.Name + configOptions["rgw_zone"] = rgwConfig.Zone + configOptions["rgw_zonegroup"] = rgwConfig.ZoneGroup for flag, val := range configOptions { err := monStore.Set(who, flag, val) diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go index f4c8306aa3f4..1341cb8e9143 100644 --- a/pkg/operator/ceph/object/controller.go +++ b/pkg/operator/ceph/object/controller.go @@ -74,6 +74,9 @@ var controllerTypeMeta = metav1.TypeMeta{ APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), } +// allow this to be overridden for unit tests +var cephObjectStoreDependents = CephObjectStoreDependents + // ReconcileCephObjectStore reconciles a cephObjectStore object type ReconcileCephObjectStore struct { client client.Client @@ -200,7 +203,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteStore() function since everything is gone already @@ -280,7 +283,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci return reconcile.Result{}, cephObjectStore, errors.Wrapf(err, "failed to check for object buckets. failed to get admin ops API context") } - deps, err := CephObjectStoreDependents(r.context, r.clusterInfo, cephObjectStore, objCtx, opsCtx) + deps, err := cephObjectStoreDependents(r.context, r.clusterInfo, cephObjectStore, objCtx, opsCtx) if err != nil { return reconcile.Result{}, cephObjectStore, err } @@ -382,6 +385,10 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c if err != nil { return r.setFailedStatus(namespacedName, "failed to reconcile external endpoint", err) } + + if err := UpdateEndpoint(objContext, &cephObjectStore.Spec); err != nil { + return r.setFailedStatus(namespacedName, "failed to set endpoint", err) + } } else { logger.Info("reconciling object store deployments") @@ -442,7 +449,10 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c // Start monitoring if !cephObjectStore.Spec.HealthCheck.Bucket.Disabled { - r.startMonitoring(cephObjectStore, objContext, namespacedName) + err = r.startMonitoring(cephObjectStore, objContext, namespacedName) + if err != nil { + return reconcile.Result{}, err + } } return reconcile.Result{}, nil @@ -507,22 +517,23 @@ func (r *ReconcileCephObjectStore) reconcileMultisiteCRs(cephObjectStore *cephv1 return cephObjectStore.Name, cephObjectStore.Name, cephObjectStore.Name, reconcile.Result{}, nil } -func (r *ReconcileCephObjectStore) startMonitoring(objectstore *cephv1.CephObjectStore, objContext *Context, namespacedName types.NamespacedName) { +func (r *ReconcileCephObjectStore) startMonitoring(objectstore *cephv1.CephObjectStore, objContext *Context, namespacedName types.NamespacedName) error { // Start monitoring object store if r.objectStoreChannels[objectstore.Name].monitoringRunning { - logger.Debug("external rgw endpoint monitoring go routine already running!") - return + logger.Info("external rgw endpoint monitoring go routine already running!") + return nil } rgwChecker, err := newBucketChecker(r.context, objContext, r.client, namespacedName, &objectstore.Spec) if err != nil { - logger.Error(err) - return + return errors.Wrapf(err, "failed to start rgw health checker for CephObjectStore %q, will re-reconcile", namespacedName.String()) } - logger.Info("starting rgw healthcheck") + logger.Infof("starting rgw health checker for CephObjectStore %q", namespacedName.String()) go rgwChecker.checkObjectStore(r.objectStoreChannels[objectstore.Name].stopChan) // Set the monitoring flag so we don't start more than one go routine r.objectStoreChannels[objectstore.Name].monitoringRunning = true + + return nil } diff --git a/pkg/operator/ceph/object/controller_test.go b/pkg/operator/ceph/object/controller_test.go index 25a0e3a86e0a..bb1c8c0e086c 100644 --- a/pkg/operator/ceph/object/controller_test.go +++ b/pkg/operator/ceph/object/controller_test.go @@ -19,17 +19,22 @@ package object import ( "context" + "net/http" "os" "testing" "time" "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" + rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" "github.com/rook/rook/pkg/client/clientset/versioned/scheme" "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/daemon/ceph/client" "github.com/rook/rook/pkg/operator/k8sutil" "github.com/rook/rook/pkg/operator/test" + "github.com/rook/rook/pkg/util/dependents" exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -281,59 +286,72 @@ func TestCephObjectStoreController(t *testing.T) { capnslog.SetGlobalLogLevel(capnslog.DEBUG) os.Setenv("ROOK_LOG_LEVEL", "DEBUG") - // - // TEST 1 SETUP - // - // FAILURE because no CephCluster - // - // A Pool resource with metadata and spec. - objectStore := &cephv1.CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: store, - Namespace: namespace, - }, - Spec: cephv1.ObjectStoreSpec{}, - TypeMeta: controllerTypeMeta, - } - objectStore.Spec.Gateway.Port = 80 + commitConfigChangesOrig := commitConfigChanges + defer func() { commitConfigChanges = commitConfigChangesOrig }() - // Objects to track in the fake client. - object := []runtime.Object{ - objectStore, + // make sure joining multisite calls to commit config changes + calledCommitConfigChanges := false + commitConfigChanges = func(c *Context) error { + calledCommitConfigChanges = true + return nil } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } + setupNewEnvironment := func(additionalObjects ...runtime.Object) *ReconcileCephObjectStore { + // reset var we use to check if we have called to commit config changes + calledCommitConfigChanges = false - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) - s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) + // A Pool resource with metadata and spec. + objectStore := &cephv1.CephObjectStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: store, + Namespace: namespace, + }, + Spec: cephv1.ObjectStoreSpec{}, + TypeMeta: controllerTypeMeta, + } + objectStore.Spec.Gateway.Port = 80 - // Create a fake client to mock API calls. - cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r := &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), + // Objects to track in the fake client. + objects := []runtime.Object{ + objectStore, + } + + for i := range additionalObjects { + objects = append(objects, additionalObjects[i]) + } + + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "status" { + return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil + } + return "", nil + }, + } + clientset := test.New(t, 3) + c := &clusterd.Context{ + Executor: executor, + RookClientset: rookclient.NewSimpleClientset(), + Clientset: clientset, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephObjectStore{}) + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}) + + // Create a fake client to mock API calls. + cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() + // Create a ReconcileCephObjectStore object with the scheme and fake client. + r := &ReconcileCephObjectStore{ + client: cl, + scheme: s, + context: c, + objectStoreChannels: make(map[string]*objectStoreHealth), + recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), + } + + return r } // Mock request to simulate Reconcile() being called on an event for a @@ -344,91 +362,86 @@ func TestCephObjectStoreController(t *testing.T) { Namespace: namespace, }, } - logger.Info("STARTING PHASE 1") - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 1 DONE") - - // - // TEST 2: - // - // FAILURE we have a cluster but it's not ready - // - cephCluster := &cephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: namespace, - }, - Status: cephv1.ClusterStatus{ - Phase: "", - CephStatus: &cephv1.CephStatus{ - Health: "", + + t.Run("error - no ceph cluster", func(t *testing.T) { + r := setupNewEnvironment() + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + assert.False(t, calledCommitConfigChanges) + }) + + t.Run("error - ceph cluster not ready", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, }, - }, - } - object = append(object, cephCluster) - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r = &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), - } - logger.Info("STARTING PHASE 2") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.True(t, res.Requeue) - logger.Info("PHASE 2 DONE") + Status: cephv1.ClusterStatus{ + Phase: "", + CephStatus: &cephv1.CephStatus{ + Health: "", + }, + }, + } - // - // TEST 3: - // - // SUCCESS! The CephCluster is ready - // + r := setupNewEnvironment(cephCluster) - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + assert.False(t, calledCommitConfigChanges) + }) - // Add ready status to the CephCluster - cephCluster.Status.Phase = k8sutil.ReadyStatus - cephCluster.Status.CephStatus.Health = "HEALTH_OK" + // set up an environment that has a ready ceph cluster, and return the reconciler for it + setupEnvironmentWithReadyCephCluster := func() *ReconcileCephObjectStore { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, + }, + Status: cephv1.ClusterStatus{ + Phase: k8sutil.ReadyStatus, + CephStatus: &cephv1.CephStatus{ + Health: "HEALTH_OK", + }, + }, + } - // Create a fake client to mock API calls. - cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + r := setupNewEnvironment(cephCluster) - // Override executor with the new ceph status and more content - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[0] == "status" { - return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil - } - if args[0] == "auth" && args[1] == "get-or-create-key" { - return rgwCephAuthGetOrCreateKey, nil - } - if args[0] == "versions" { - return dummyVersionsRaw, nil - } - if args[0] == "osd" && args[1] == "lspools" { - // ceph actually outputs this all on one line, but this parses the same - return `[ + secrets := map[string][]byte{ + "fsid": []byte(name), + "mon-secret": []byte("monsecret"), + "admin-secret": []byte("adminsecret"), + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph-mon", + Namespace: namespace, + }, + Data: secrets, + Type: k8sutil.RookType, + } + _, err := r.context.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Override executor with the new ceph status and more content + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "status" { + return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_OK"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil + } + if args[0] == "auth" && args[1] == "get-or-create-key" { + return rgwCephAuthGetOrCreateKey, nil + } + if args[0] == "versions" { + return dummyVersionsRaw, nil + } + if args[0] == "osd" && args[1] == "lspools" { + // ceph actually outputs this all on one line, but this parses the same + return `[ {"poolnum":1,"poolname":"replicapool"}, {"poolnum":2,"poolname":"device_health_metrics"}, {"poolnum":3,"poolname":".rgw.root"}, @@ -439,49 +452,68 @@ func TestCephObjectStoreController(t *testing.T) { {"poolnum":8,"poolname":"my-store.rgw.meta"}, {"poolnum":9,"poolname":"my-store.rgw.buckets.data"} ]`, nil - } - return "", nil - }, - MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { - if args[0] == "realm" && args[1] == "list" { - return realmListJSON, nil - } - if args[0] == "realm" && args[1] == "get" { - return realmGetJSON, nil - } - if args[0] == "zonegroup" && args[1] == "get" { - return zoneGroupGetJSON, nil - } - if args[0] == "zone" && args[1] == "get" { - return zoneGetJSON, nil - } - if args[0] == "user" { - return userCreateJSON, nil - } - return "", nil - }, - } - c.Executor = executor + } + return "", nil + }, + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { + if args[0] == "realm" && args[1] == "list" { + return realmListJSON, nil + } + if args[0] == "realm" && args[1] == "get" { + return realmGetJSON, nil + } + if args[0] == "zonegroup" && args[1] == "get" { + return zoneGroupGetJSON, nil + } + if args[0] == "zone" && args[1] == "get" { + return zoneGetJSON, nil + } + if args[0] == "user" { + return userCreateJSON, nil + } + return "", nil + }, + } + r.context.Executor = executor - // Create a ReconcileCephObjectStore object with the scheme and fake client. - r = &ReconcileCephObjectStore{ - client: cl, - scheme: s, - context: c, - objectStoreChannels: make(map[string]*objectStoreHealth), - recorder: k8sutil.NewEventReporter(record.NewFakeRecorder(5)), + return r } - logger.Info("STARTING PHASE 3") - res, err = r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectStore) - assert.NoError(t, err) - assert.Equal(t, cephv1.ConditionProgressing, objectStore.Status.Phase, objectStore) - assert.NotEmpty(t, objectStore.Status.Info["endpoint"], objectStore) - assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", objectStore.Status.Info["endpoint"], objectStore) - logger.Info("PHASE 3 DONE") + t.Run("error - failed to start health checker", func(t *testing.T) { + r := setupEnvironmentWithReadyCephCluster() + + // cause a failure when creating the admin ops api for the health check + origHTTPClientFunc := genObjectStoreHTTPClientFunc + genObjectStoreHTTPClientFunc = func(objContext *Context, spec *cephv1.ObjectStoreSpec) (client *http.Client, tlsCert []byte, err error) { + return nil, []byte{}, errors.New("induced error creating admin ops API connection") + } + defer func() { genObjectStoreHTTPClientFunc = origHTTPClientFunc }() + + _, err := r.Reconcile(ctx, req) + assert.Error(t, err) + // we don't actually care if Requeue is true if there is an error assert.True(t, res.Requeue) + assert.Contains(t, err.Error(), "failed to start rgw health checker") + assert.Contains(t, err.Error(), "induced error creating admin ops API connection") + + // health checker should start up after committing config changes + assert.True(t, calledCommitConfigChanges) + }) + + t.Run("success - object store is running", func(t *testing.T) { + r := setupEnvironmentWithReadyCephCluster() + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + + objectStore := &cephv1.CephObjectStore{} + err = r.client.Get(context.TODO(), req.NamespacedName, objectStore) + assert.NoError(t, err) + assert.Equal(t, cephv1.ConditionProgressing, objectStore.Status.Phase, objectStore) + assert.NotEmpty(t, objectStore.Status.Info["endpoint"], objectStore) + assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", objectStore.Status.Info["endpoint"], objectStore) + assert.True(t, calledCommitConfigChanges) + }) } func TestCephObjectStoreControllerMultisite(t *testing.T) { @@ -617,6 +649,16 @@ func TestCephObjectStoreControllerMultisite(t *testing.T) { }, } + commitConfigChangesOrig := commitConfigChanges + defer func() { commitConfigChanges = commitConfigChangesOrig }() + + // make sure joining multisite calls to commit config changes + calledCommitConfigChanges := false + commitConfigChanges = func(c *Context) error { + calledCommitConfigChanges = true + return nil + } + clientset := test.New(t, 3) c := &clusterd.Context{ Executor: executor, @@ -649,9 +691,46 @@ func TestCephObjectStoreControllerMultisite(t *testing.T) { }, } - res, err := r.Reconcile(ctx, req) - assert.NoError(t, err) - assert.False(t, res.Requeue) - err = r.client.Get(context.TODO(), req.NamespacedName, objectStore) - assert.NoError(t, err) + t.Run("create an object store", func(t *testing.T) { + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + assert.True(t, calledCommitConfigChanges) + err = r.client.Get(ctx, req.NamespacedName, objectStore) + assert.NoError(t, err) + }) + + t.Run("delete the same store", func(t *testing.T) { + calledCommitConfigChanges = false + + // no dependents + dependentsChecked := false + cephObjectStoreDependentsOrig := cephObjectStoreDependents + defer func() { cephObjectStoreDependents = cephObjectStoreDependentsOrig }() + cephObjectStoreDependents = func(clusterdCtx *clusterd.Context, clusterInfo *client.ClusterInfo, store *cephv1.CephObjectStore, objCtx *Context, opsCtx *AdminOpsContext) (*dependents.DependentList, error) { + dependentsChecked = true + return &dependents.DependentList{}, nil + } + + err = r.client.Get(ctx, req.NamespacedName, objectStore) + assert.NoError(t, err) + objectStore.DeletionTimestamp = &metav1.Time{ + Time: time.Now(), + } + err = r.client.Update(ctx, objectStore) + + // have to also track the same objects in the rook clientset + r.context.RookClientset = rookfake.NewSimpleClientset( + objectRealm, + objectZoneGroup, + objectZone, + objectStore, + ) + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + assert.True(t, dependentsChecked) + assert.True(t, calledCommitConfigChanges) + }) } diff --git a/pkg/operator/ceph/object/health.go b/pkg/operator/ceph/object/health.go index 00f36dea8be4..1faa7c208a38 100644 --- a/pkg/operator/ceph/object/health.go +++ b/pkg/operator/ceph/object/health.go @@ -159,14 +159,13 @@ func (c *bucketChecker) checkObjectStoreHealth() error { } // Set access and secret key - tlsCert := c.objContext.TlsCert s3endpoint := c.objContext.Endpoint s3AccessKey := user.Keys[0].AccessKey s3SecretKey := user.Keys[0].SecretKey // Initiate s3 agent logger.Debugf("initializing s3 connection for object store %q", c.namespacedName.Name) - s3client, err := NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, false, tlsCert) + s3client, err := NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", false) if err != nil { return errors.Wrap(err, "failed to initialize s3 connection") } diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go index 22819afe9e90..ec0c2f1444f2 100644 --- a/pkg/operator/ceph/object/objectstore.go +++ b/pkg/operator/ceph/object/objectstore.go @@ -39,6 +39,7 @@ import ( v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" ) const ( @@ -86,6 +87,9 @@ type realmType struct { Realms []string `json:"realms"` } +// allow commitConfigChanges to be overridden for unit testing +var commitConfigChanges = CommitConfigChanges + func deleteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreSpec) error { if spec.IsMultisite() { // since pools for object store are created by the zone, the object store only needs to be removed from the zone @@ -146,12 +150,11 @@ func removeObjectStoreFromMultisite(objContext *Context, spec cephv1.ObjectStore logger.Infof("WARNING: No other zone in realm %q can commit to the period or pull the realm until you create another object-store in zone %q", objContext.Realm, objContext.Zone) } - // the period will help notify other zones of changes if there are multi-zones - _, err = runAdminCommand(objContext, false, "period", "update", "--commit") - if err != nil { - return errors.Wrap(err, "failed to update period after removing an endpoint from the zone") + // this will notify other zones of changes if there are multi-zones + if err := commitConfigChanges(objContext); err != nil { + nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) + return errors.Wrapf(err, "failed to commit config changes after removing CephObjectStore %q from multi-site", nsName) } - logger.Infof("successfully updated period for realm %v after removal of object-store %v", objContext.Realm, objContext.Name) return nil } @@ -362,20 +365,18 @@ func createMultisite(objContext *Context, endpointArg string) error { realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm) zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) - updatePeriod := false // create the realm if it doesn't exist yet output, err := RunAdminCommandNoMultisite(objContext, true, "realm", "get", realmArg) if err != nil { // ENOENT means “No such file or directory” if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true output, err = RunAdminCommandNoMultisite(objContext, false, "realm", "create", realmArg) if err != nil { return errorOrIsNotFound(err, "failed to create ceph realm %q, for reason %q", objContext.ZoneGroup, output) } - logger.Debugf("created realm %v", objContext.Realm) + logger.Debugf("created realm %q", objContext.Realm) } else { - return errorOrIsNotFound(err, "radosgw-admin realm get failed with code %d, for reason %q. %v", strconv.Itoa(code), output, string(kerrors.ReasonForError(err))) + return errorOrIsNotFound(err, "'radosgw-admin realm get' failed with code %d, for reason %q. %v", strconv.Itoa(code), output, string(kerrors.ReasonForError(err))) } } @@ -384,14 +385,13 @@ func createMultisite(objContext *Context, endpointArg string) error { if err != nil { // ENOENT means “No such file or directory” if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true output, err = RunAdminCommandNoMultisite(objContext, false, "zonegroup", "create", "--master", realmArg, zoneGroupArg, endpointArg) if err != nil { return errorOrIsNotFound(err, "failed to create ceph zone group %q, for reason %q", objContext.ZoneGroup, output) } - logger.Debugf("created zone group %v", objContext.ZoneGroup) + logger.Debugf("created zone group %q", objContext.ZoneGroup) } else { - return errorOrIsNotFound(err, "radosgw-admin zonegroup get failed with code %d, for reason %q", strconv.Itoa(code), output) + return errorOrIsNotFound(err, "'radosgw-admin zonegroup get' failed with code %d, for reason %q", strconv.Itoa(code), output) } } @@ -400,24 +400,19 @@ func createMultisite(objContext *Context, endpointArg string) error { if err != nil { // ENOENT means “No such file or directory” if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.ENOENT) { - updatePeriod = true output, err = runAdminCommand(objContext, false, "zone", "create", "--master", endpointArg) if err != nil { return errorOrIsNotFound(err, "failed to create ceph zone %q, for reason %q", objContext.Zone, output) } - logger.Debugf("created zone %v", objContext.Zone) + logger.Debugf("created zone %q", objContext.Zone) } else { - return errorOrIsNotFound(err, "radosgw-admin zone get failed with code %d, for reason %q", strconv.Itoa(code), output) + return errorOrIsNotFound(err, "'radosgw-admin zone get' failed with code %d, for reason %q", strconv.Itoa(code), output) } } - if updatePeriod { - // the period will help notify other zones of changes if there are multi-zones - _, err := runAdminCommand(objContext, false, "period", "update", "--commit") - if err != nil { - return errorOrIsNotFound(err, "failed to update period") - } - logger.Debugf("updated period for realm %v", objContext.Realm) + if err := commitConfigChanges(objContext); err != nil { + nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) + return errors.Wrapf(err, "failed to commit config changes after creating multisite config for CephObjectStore %q", nsName) } logger.Infof("Multisite for object-store: realm=%s, zonegroup=%s, zone=%s", objContext.Realm, objContext.ZoneGroup, objContext.Zone) @@ -457,11 +452,11 @@ func joinMultisite(objContext *Context, endpointArg, zoneEndpoints, namespace st } logger.Debugf("endpoints for zone %q are now %q", objContext.Zone, zoneEndpoints) - // the period will help notify other zones of changes if there are multi-zones - _, err = RunAdminCommandNoMultisite(objContext, false, "period", "update", "--commit", realmArg, zoneGroupArg, zoneArg) - if err != nil { - return errorOrIsNotFound(err, "failed to update period") + if err := commitConfigChanges(objContext); err != nil { + nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) + return errors.Wrapf(err, "failed to commit config changes for CephObjectStore %q when joining multisite ", nsName) } + logger.Infof("added object store %q to realm %q, zonegroup %q, zone %q", objContext.Name, objContext.Realm, objContext.ZoneGroup, objContext.Zone) // create system user for realm for master zone in master zonegorup for multisite scenario @@ -482,7 +477,7 @@ func createSystemUser(objContext *Context, namespace string) error { zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup) zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone) - output, err := RunAdminCommandNoMultisite(objContext, false, "user", "info", uidArg) + output, err := RunAdminCommandNoMultisite(objContext, false, "user", "info", uidArg, realmArg, zoneGroupArg, zoneArg) if err == nil { logger.Debugf("realm system user %q has already been created", uid) return nil @@ -685,14 +680,14 @@ func missingPools(context *Context) ([]string, error) { if err != nil { return []string{}, errors.Wrapf(err, "failed to determine if pools are missing. failed to list pools") } - existingPools := util.NewSet() + existingPools := sets.NewString() for _, summary := range existingPoolSummaries { - existingPools.Add(summary.Name) + existingPools.Insert(summary.Name) } missingPools := []string{} for _, objPool := range allObjectPools(context.Name) { - if !existingPools.Contains(objPool) { + if !existingPools.Has(objPool) { missingPools = append(missingPools, objPool) } } @@ -875,7 +870,7 @@ func enableRGWDashboard(context *Context) error { // for latest Ceph versions if mgr.FileBasedPasswordSupported(context.clusterInfo) { - accessFile, err := mgr.CreateTempPasswordFile(*u.AccessKey) + accessFile, err := util.CreateTempFile(*u.AccessKey) if err != nil { return errors.Wrap(err, "failed to create a temporary dashboard access-key file") } @@ -887,7 +882,7 @@ func enableRGWDashboard(context *Context) error { } }() - secretFile, err = mgr.CreateTempPasswordFile(*u.SecretKey) + secretFile, err = util.CreateTempFile(*u.SecretKey) if err != nil { return errors.Wrap(err, "failed to create a temporary dashboard secret-key file") } @@ -911,7 +906,7 @@ func enableRGWDashboard(context *Context) error { // starting in ceph v15.2.8. We run it in a goroutine until the fix // is found. We expect the ceph command to timeout so at least the goroutine exits. logger.Info("setting the dashboard api secret key") - _, err = cephCmd.RunWithTimeout(exec.CephCommandTimeout) + _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { logger.Errorf("failed to set user %q secretkey. %v", DashboardUser, err) } @@ -943,14 +938,14 @@ func disableRGWDashboard(context *Context) { args := []string{"dashboard", "reset-rgw-api-access-key"} cephCmd := cephclient.NewCephCommand(context.Context, context.clusterInfo, args) - _, err = cephCmd.RunWithTimeout(exec.CephCommandTimeout) + _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { logger.Warningf("failed to reset user accesskey for user %q. %v", DashboardUser, err) } args = []string{"dashboard", "reset-rgw-api-secret-key"} cephCmd = cephclient.NewCephCommand(context.Context, context.clusterInfo, args) - _, err = cephCmd.RunWithTimeout(exec.CephCommandTimeout) + _, err = cephCmd.RunWithTimeout(exec.CephCommandsTimeout) if err != nil { logger.Warningf("failed to reset user secretkey for user %q. %v", DashboardUser, err) } diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go index 4c79be443042..3b9e3a0f6781 100644 --- a/pkg/operator/ceph/object/objectstore_test.go +++ b/pkg/operator/ceph/object/objectstore_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "os" + "syscall" "testing" "time" @@ -57,7 +58,14 @@ const ( "system": "true", "temp_url_keys": [], "type": "rgw", - "mfa_ids": [] + "mfa_ids": [], + "user_quota": { + "enabled": false, + "check_on_raw": false, + "max_size": -1, + "max_size_kb": 0, + "max_objects": -1 + } }` access_key = "VFKF8SSU9L3L2UR03Z8C" ) @@ -297,3 +305,261 @@ func TestDashboard(t *testing.T) { assert.True(t, checkdashboard) disableRGWDashboard(objContext) } + +// import TestMockExecHelperProcess +func TestMockExecHelperProcess(t *testing.T) { + exectest.TestMockExecHelperProcess(t) +} + +func Test_createMultisite(t *testing.T) { + // control the return values from calling get/create/update on resources + type commandReturns struct { + realmExists bool + zoneGroupExists bool + zoneExists bool + failCreateRealm bool + failCreateZoneGroup bool + failCreateZone bool + failCommitConfigChanges bool + } + + // control whether we should expect certain 'get' calls + type expectCommands struct { + getRealm bool + createRealm bool + getZoneGroup bool + createZoneGroup bool + getZone bool + createZone bool + commitConfigChanges bool + } + + // vars used for testing if calls were made + var ( + calledGetRealm = false + calledGetZoneGroup = false + calledGetZone = false + calledCreateRealm = false + calledCreateZoneGroup = false + calledCreateZone = false + calledCommitConfigChanges = false + ) + + commitConfigChangesOrig := commitConfigChanges + defer func() { commitConfigChanges = commitConfigChangesOrig }() + + enoentIfNotExist := func(resourceExists bool) (string, error) { + if !resourceExists { + return "", exectest.MockExecCommandReturns(t, "", "", int(syscall.ENOENT)) + } + return "{}", nil // get wants json, and {} is the most basic json + } + + errorIfFail := func(shouldFail bool) (string, error) { + if shouldFail { + return "", exectest.MockExecCommandReturns(t, "", "basic error", 1) + } + return "", nil + } + + setupTest := func(env commandReturns) *exectest.MockExecutor { + // reset output testing vars + calledGetRealm = false + calledCreateRealm = false + calledGetZoneGroup = false + calledCreateZoneGroup = false + calledGetZone = false + calledCreateZone = false + calledCommitConfigChanges = false + + commitConfigChanges = func(c *Context) error { + calledCommitConfigChanges = true + if env.failCommitConfigChanges { + return errors.New("fake error from CommitConfigChanges") + } + return nil + } + + return &exectest.MockExecutor{ + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, arg ...string) (string, error) { + if command == "radosgw-admin" { + switch arg[0] { + case "realm": + switch arg[1] { + case "get": + calledGetRealm = true + return enoentIfNotExist(env.realmExists) + case "create": + calledCreateRealm = true + return errorIfFail(env.failCreateRealm) + } + case "zonegroup": + switch arg[1] { + case "get": + calledGetZoneGroup = true + return enoentIfNotExist(env.zoneGroupExists) + case "create": + calledCreateZoneGroup = true + return errorIfFail(env.failCreateZoneGroup) + } + case "zone": + switch arg[1] { + case "get": + calledGetZone = true + return enoentIfNotExist(env.zoneExists) + case "create": + calledCreateZone = true + return errorIfFail(env.failCreateZone) + } + } + } + t.Fatalf("unhandled command: %s %v", command, arg) + panic("unhandled command") + }, + } + } + + expectNoErr := false // want no error + expectErr := true // want an error + + tests := []struct { + name string + commandReturns commandReturns + expectCommands expectCommands + wantErr bool + }{ + {"create realm, zonegroup, and zone; commit config", + commandReturns{ + // nothing exists, and all should succeed + }, + expectCommands{ + getRealm: true, + createRealm: true, + getZoneGroup: true, + createZoneGroup: true, + getZone: true, + createZone: true, + commitConfigChanges: true, + }, + expectNoErr}, + {"fail creating realm", + commandReturns{ + failCreateRealm: true, + }, + expectCommands{ + getRealm: true, + createRealm: true, + // when we fail to create realm, we should not continue + }, + expectErr}, + {"fail creating zonegroup", + commandReturns{ + failCreateZoneGroup: true, + }, + expectCommands{ + getRealm: true, + createRealm: true, + getZoneGroup: true, + createZoneGroup: true, + // when we fail to create zonegroup, we should not continue + }, + expectErr}, + {"fail creating zone", + commandReturns{ + failCreateZone: true, + }, + expectCommands{ + getRealm: true, + createRealm: true, + getZoneGroup: true, + createZoneGroup: true, + getZone: true, + createZone: true, + // when we fail to create zone, we should not continue + }, + expectErr}, + {"fail commit config", + commandReturns{ + failCommitConfigChanges: true, + }, + expectCommands{ + getRealm: true, + createRealm: true, + getZoneGroup: true, + createZoneGroup: true, + getZone: true, + createZone: true, + commitConfigChanges: true, + }, + expectErr}, + {"realm exists; create zonegroup and zone; commit config", + commandReturns{ + realmExists: true, + }, + expectCommands{ + getRealm: true, + createRealm: false, + getZoneGroup: true, + createZoneGroup: true, + getZone: true, + createZone: true, + commitConfigChanges: true, + }, + expectNoErr}, + {"realm and zonegroup exist; create zone; commit config", + commandReturns{ + realmExists: true, + zoneGroupExists: true, + }, + expectCommands{ + getRealm: true, + createRealm: false, + getZoneGroup: true, + createZoneGroup: false, + getZone: true, + createZone: true, + commitConfigChanges: true, + }, + expectNoErr}, + {"realm, zonegroup, and zone exist; commit config", + commandReturns{ + realmExists: true, + zoneGroupExists: true, + zoneExists: true, + }, + expectCommands{ + getRealm: true, + createRealm: false, + getZoneGroup: true, + createZoneGroup: false, + getZone: true, + createZone: false, + commitConfigChanges: true, + }, + expectNoErr}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + executor := setupTest(tt.commandReturns) + ctx := &clusterd.Context{ + Executor: executor, + } + objContext := NewContext(ctx, &client.ClusterInfo{Namespace: "my-cluster"}, "my-store") + + // assumption: endpointArg is sufficiently tested by integration tests + err := createMultisite(objContext, "") + assert.Equal(t, tt.expectCommands.getRealm, calledGetRealm) + assert.Equal(t, tt.expectCommands.createRealm, calledCreateRealm) + assert.Equal(t, tt.expectCommands.getZoneGroup, calledGetZoneGroup) + assert.Equal(t, tt.expectCommands.createZoneGroup, calledCreateZoneGroup) + assert.Equal(t, tt.expectCommands.getZone, calledGetZone) + assert.Equal(t, tt.expectCommands.createZone, calledCreateZone) + assert.Equal(t, tt.expectCommands.commitConfigChanges, calledCommitConfigChanges) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/operator/ceph/object/realm/controller.go b/pkg/operator/ceph/object/realm/controller.go index 6cb5e6a22e09..0b4c6fe43cfe 100644 --- a/pkg/operator/ceph/object/realm/controller.go +++ b/pkg/operator/ceph/object/realm/controller.go @@ -149,7 +149,7 @@ func (r *ReconcileObjectRealm) reconcile(request reconcile.Request) (reconcile.R } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR if !cephObjectRealm.GetDeletionTimestamp().IsZero() && !cephClusterExists { diff --git a/pkg/operator/ceph/object/rgw.go b/pkg/operator/ceph/object/rgw.go index 10adfa279b0d..462f0aaafebc 100644 --- a/pkg/operator/ceph/object/rgw.go +++ b/pkg/operator/ceph/object/rgw.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "net/http" "reflect" + "strconv" "syscall" "github.com/banzaicloud/k8s-objectmatcher/patch" @@ -62,6 +63,10 @@ type rgwConfig struct { var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait +var ( + insecureSkipVerify = "insecureSkipVerify" +) + func (c *clusterConfig) createOrUpdateStore(realmName, zoneGroupName, zoneName string) error { logger.Infof("creating object store %q in namespace %q", c.store.Name, c.store.Namespace) @@ -124,7 +129,7 @@ func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string) // Unfortunately, on upgrade we would not set the flags which is not ideal for old clusters where we were no setting those flags // The KV supports setting those flags even if the RGW is running logger.Info("setting rgw config flags") - err = c.setDefaultFlagsMonConfigStore(rgwConfig.ResourceName) + err = c.setDefaultFlagsMonConfigStore(rgwConfig) if err != nil { // Getting EPERM typically happens when the flag may not be modified at runtime // This is fine to ignore @@ -137,9 +142,9 @@ func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string) // Create deployment deployment, err := c.createDeployment(rgwConfig) if err != nil { - return nil + return errors.Wrap(err, "failed to create rgw deployment") } - logger.Infof("object store %q deployment %q started", c.store.Name, deployment.Name) + logger.Infof("object store %q deployment %q created", c.store.Name, deployment.Name) // Set owner ref to cephObjectStore object err = c.ownerInfo.SetControllerReference(deployment) @@ -322,7 +327,8 @@ func BuildDNSEndpoint(domainName string, port int32, secure bool) string { } // GetTLSCACert fetch cacert for internal RGW requests -func GetTlsCaCert(objContext *Context, objectStoreSpec *cephv1.ObjectStoreSpec) ([]byte, error) { +func GetTlsCaCert(objContext *Context, objectStoreSpec *cephv1.ObjectStoreSpec) ([]byte, bool, error) { + var insecureTLS, ok bool ctx := context.TODO() var ( tlsCert []byte @@ -332,34 +338,54 @@ func GetTlsCaCert(objContext *Context, objectStoreSpec *cephv1.ObjectStoreSpec) if objectStoreSpec.Gateway.SSLCertificateRef != "" { tlsSecretCert, err := objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Get(ctx, objectStoreSpec.Gateway.SSLCertificateRef, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "failed to get secret %s containing TLS certificate defined in %s", objectStoreSpec.Gateway.SSLCertificateRef, objContext.Name) + return nil, false, errors.Wrapf(err, "failed to get secret %q containing TLS certificate defined in %q", objectStoreSpec.Gateway.SSLCertificateRef, objContext.Name) } if tlsSecretCert.Type == v1.SecretTypeOpaque { - tlsCert = tlsSecretCert.Data[certKeyName] + tlsCert, ok = tlsSecretCert.Data[certKeyName] + if !ok { + return nil, false, errors.Errorf("failed to get TLS certificate from secret, token is %q but key %q does not exist", v1.SecretTypeOpaque, certKeyName) + } } else if tlsSecretCert.Type == v1.SecretTypeTLS { - tlsCert = tlsSecretCert.Data[v1.TLSCertKey] + tlsCert, ok = tlsSecretCert.Data[v1.TLSCertKey] + if !ok { + return nil, false, errors.Errorf("failed to get TLS certificate from secret, token is %q but key %q does not exist", v1.SecretTypeTLS, v1.TLSCertKey) + } + } else { + return nil, false, errors.Errorf("failed to get TLS certificate from secret, unknown secret type %q", tlsSecretCert.Type) + } + // If the secret contains an indication that the TLS connection should be insecure, then + // let's apply it to the client. + insecureTLSStr, ok := tlsSecretCert.Data[insecureSkipVerify] + if ok { + insecureTLS, err = strconv.ParseBool(string(insecureTLSStr)) + if err != nil { + return nil, false, errors.Wrap(err, "failed to parse insecure tls bool option") + } } } else if objectStoreSpec.GetServiceServingCert() != "" { tlsCert, err = ioutil.ReadFile(ServiceServingCertCAFile) if err != nil { - return nil, errors.Wrapf(err, "failed to fetch TLS certificate from %q", ServiceServingCertCAFile) + return nil, false, errors.Wrapf(err, "failed to fetch TLS certificate from %q", ServiceServingCertCAFile) } } - return tlsCert, nil + return tlsCert, insecureTLS, nil } -func GenObjectStoreHTTPClient(objContext *Context, spec *cephv1.ObjectStoreSpec) (*http.Client, []byte, error) { +// Allow overriding this function for unit tests to mock the admin ops api +var genObjectStoreHTTPClientFunc = genObjectStoreHTTPClient + +func genObjectStoreHTTPClient(objContext *Context, spec *cephv1.ObjectStoreSpec) (*http.Client, []byte, error) { nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name) c := &http.Client{} tlsCert := []byte{} if spec.IsTLSEnabled() { var err error - tlsCert, err = GetTlsCaCert(objContext, spec) + tlsCert, insecureTLS, err := GetTlsCaCert(objContext, spec) if err != nil { return nil, tlsCert, errors.Wrapf(err, "failed to fetch CA cert to establish TLS connection with object store %q", nsName) } - c.Transport = BuildTransportTLS(tlsCert) + c.Transport = BuildTransportTLS(tlsCert, insecureTLS) } return c, tlsCert, nil } diff --git a/pkg/operator/ceph/object/rgw_test.go b/pkg/operator/ceph/object/rgw_test.go index 0e0f45f752ab..3d9937304997 100644 --- a/pkg/operator/ceph/object/rgw_test.go +++ b/pkg/operator/ceph/object/rgw_test.go @@ -25,14 +25,14 @@ import ( cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/client/clientset/versioned/scheme" "github.com/rook/rook/pkg/clusterd" - - cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/daemon/ceph/client" clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test" "github.com/rook/rook/pkg/operator/ceph/config" "github.com/rook/rook/pkg/operator/k8sutil" - testop "github.com/rook/rook/pkg/operator/test" + "github.com/rook/rook/pkg/operator/test" exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fclient "k8s.io/client-go/kubernetes/fake" @@ -41,7 +41,7 @@ import ( func TestStartRGW(t *testing.T) { ctx := context.TODO() - clientset := testop.New(t, 3) + clientset := test.New(t, 3) executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { if args[0] == "auth" && args[1] == "get-or-create-key" { @@ -66,7 +66,7 @@ func TestStartRGW(t *testing.T) { r := &ReconcileCephObjectStore{client: cl, scheme: s} // start a basic cluster - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() + ownerInfo := client.NewMinimumOwnerInfoWithOwnerRef() c := &clusterConfig{context, info, store, version, &cephv1.ClusterSpec{}, ownerInfo, data, r.client} err := c.startRGWPods(store.Name, store.Name, store.Name) assert.Nil(t, err) @@ -102,7 +102,7 @@ func TestCreateObjectStore(t *testing.T) { } store := simpleStore() - clientset := testop.New(t, 3) + clientset := test.New(t, 3) context := &clusterd.Context{Executor: executor, Clientset: clientset} info := clienttest.CreateTestClusterInfo(1) data := config.NewStatelessDaemonDataPathMap(config.RgwType, "my-fs", "rook-ceph", "/var/lib/rook/") @@ -112,7 +112,7 @@ func TestCreateObjectStore(t *testing.T) { object := []runtime.Object{&cephv1.CephObjectStore{}} cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() r := &ReconcileCephObjectStore{client: cl, scheme: s} - ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef() + ownerInfo := client.NewMinimumOwnerInfoWithOwnerRef() c := &clusterConfig{context, info, store, "1.2.3.4", &cephv1.ClusterSpec{}, ownerInfo, data, r.client} err := c.createOrUpdateStore(store.Name, store.Name, store.Name) assert.Nil(t, err) @@ -134,7 +134,7 @@ func TestGenerateSecretName(t *testing.T) { // start a basic cluster c := &clusterConfig{&clusterd.Context{}, - &cephclient.ClusterInfo{}, + &client.ClusterInfo{}, &cephv1.CephObjectStore{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "mycluster"}}, "v1.1.0", &cephv1.ClusterSpec{}, @@ -174,3 +174,96 @@ func TestBuildDomainNameAndEndpoint(t *testing.T) { ep = BuildDNSEndpoint(dns, securePort, true) assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", ep) } + +func TestGetTlsCaCert(t *testing.T) { + objContext := &Context{ + Context: &clusterd.Context{ + Clientset: test.New(t, 3), + }, + clusterInfo: client.AdminClusterInfo("rook-ceph"), + } + objectStore := simpleStore() + + t.Run("no gateway cert ref", func(t *testing.T) { + tlsCert, insesure, err := GetTlsCaCert(objContext, &objectStore.Spec) + assert.NoError(t, err) + assert.False(t, insesure) + assert.Nil(t, tlsCert) + }) + + t.Run("gateway cert ref but secret no found", func(t *testing.T) { + objectStore.Spec.Gateway.SSLCertificateRef = "my-secret" + tlsCert, insesure, err := GetTlsCaCert(objContext, &objectStore.Spec) + assert.Error(t, err) + assert.False(t, insesure) + assert.Nil(t, tlsCert) + }) + + t.Run("gateway cert ref and secret found but no key and wrong type", func(t *testing.T) { + s := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "rook-ceph", + }, + Type: "Yolo", + } + _, err := objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Create(context.TODO(), s, metav1.CreateOptions{}) + assert.NoError(t, err) + objectStore.Spec.Gateway.SSLCertificateRef = "my-secret" + tlsCert, insesure, err := GetTlsCaCert(objContext, &objectStore.Spec) + assert.Error(t, err) + assert.EqualError(t, err, "failed to get TLS certificate from secret, unknown secret type \"Yolo\"") + assert.False(t, insesure) + assert.Nil(t, tlsCert) + err = objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Delete(context.TODO(), s.Name, metav1.DeleteOptions{}) + assert.NoError(t, err) + }) + + t.Run("gateway cert ref and Opaque secret found and no key is present", func(t *testing.T) { + s := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "rook-ceph", + }, + Type: "Opaque", + } + _, err := objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Create(context.TODO(), s, metav1.CreateOptions{}) + assert.NoError(t, err) + objectStore.Spec.Gateway.SSLCertificateRef = "my-secret" + tlsCert, insesure, err := GetTlsCaCert(objContext, &objectStore.Spec) + assert.Error(t, err) + assert.EqualError(t, err, "failed to get TLS certificate from secret, token is \"Opaque\" but key \"cert\" does not exist") + assert.False(t, insesure) + assert.Nil(t, tlsCert) + err = objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Delete(context.TODO(), s.Name, metav1.DeleteOptions{}) + assert.NoError(t, err) + }) + + t.Run("gateway cert ref and Opaque secret found and key is present", func(t *testing.T) { + s := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "rook-ceph", + }, + Data: map[string][]byte{"cert": []byte(`-----BEGIN CERTIFICATE----- +MIIBJTCB0AIJAPNFNz1CNlDOMA0GCSqGSIb3DQEBCwUAMBoxCzAJBgNVBAYTAkZS +MQswCQYDVQQIDAJGUjAeFw0yMTA5MzAwODAzNDBaFw0yNDA2MjYwODAzNDBaMBox +CzAJBgNVBAYTAkZSMQswCQYDVQQIDAJGUjBcMA0GCSqGSIb3DQEBAQUAA0sAMEgC +QQDHeZ47hVBcryl6SCghM8Zj3Q6DQzJzno1J7EjPXef5m+pIVAEylS9sQuwKtFZc +vv3qS/OVFExmMdbrvfKEIfbBAgMBAAEwDQYJKoZIhvcNAQELBQADQQAAnflLuUM3 +4Dq0v7If4cgae2mr7jj3U/lIpHVtFbF7kVjC/eqmeN1a9u0UbRHKkUr+X1mVX3rJ +BvjQDN6didwQ +-----END CERTIFICATE-----`)}, + Type: "Opaque", + } + _, err := objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Create(context.TODO(), s, metav1.CreateOptions{}) + assert.NoError(t, err) + objectStore.Spec.Gateway.SSLCertificateRef = "my-secret" + tlsCert, insesure, err := GetTlsCaCert(objContext, &objectStore.Spec) + assert.NoError(t, err) + assert.False(t, insesure) + assert.NotNil(t, tlsCert) + err = objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Delete(context.TODO(), s.Name, metav1.DeleteOptions{}) + assert.NoError(t, err) + }) +} diff --git a/pkg/operator/ceph/object/s3-handlers.go b/pkg/operator/ceph/object/s3-handlers.go index 98701f7340fa..a76d40879ee7 100644 --- a/pkg/operator/ceph/object/s3-handlers.go +++ b/pkg/operator/ceph/object/s3-handlers.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2018 The Rook Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -36,16 +36,19 @@ type S3Agent struct { Client *s3.S3 } -func NewS3Agent(accessKey, secretKey, endpoint string, debug bool, tlsCert []byte) (*S3Agent, error) { - return newS3Agent(accessKey, secretKey, endpoint, debug, tlsCert, false) +func NewS3Agent(accessKey, secretKey, endpoint, region string, debug bool, tlsCert []byte) (*S3Agent, error) { + return newS3Agent(accessKey, secretKey, endpoint, region, debug, tlsCert, false) } -func NewTestOnlyS3Agent(accessKey, secretKey, endpoint string, debug bool) (*S3Agent, error) { - return newS3Agent(accessKey, secretKey, endpoint, debug, nil, true) +func NewInsecureS3Agent(accessKey, secretKey, endpoint, region string, debug bool) (*S3Agent, error) { + return newS3Agent(accessKey, secretKey, endpoint, region, debug, nil, true) } -func newS3Agent(accessKey, secretKey, endpoint string, debug bool, tlsCert []byte, insecure bool) (*S3Agent, error) { - const cephRegion = "us-east-1" +func newS3Agent(accessKey, secretKey, endpoint, region string, debug bool, tlsCert []byte, insecure bool) (*S3Agent, error) { + var cephRegion = "us-east-1" + if region != "" { + cephRegion = region + } logLevel := aws.LogOff if debug { @@ -57,14 +60,7 @@ func newS3Agent(accessKey, secretKey, endpoint string, debug bool, tlsCert []byt tlsEnabled := false if len(tlsCert) > 0 || insecure { tlsEnabled = true - if len(tlsCert) > 0 { - client.Transport = BuildTransportTLS(tlsCert) - } else if insecure { - client.Transport = &http.Transport{ - // #nosec G402 is enabled only for testing - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } + client.Transport = BuildTransportTLS(tlsCert, insecure) } sess, err := session.NewSession( aws.NewConfig(). @@ -202,11 +198,16 @@ func (s *S3Agent) DeleteObjectInBucket(bucketname string, key string) (bool, err return true, nil } -func BuildTransportTLS(tlsCert []byte) *http.Transport { - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(tlsCert) +func BuildTransportTLS(tlsCert []byte, insecure bool) *http.Transport { + // #nosec G402 is enabled only for testing + tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12, InsecureSkipVerify: insecure} + if len(tlsCert) > 0 { + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(tlsCert) + tlsConfig.RootCAs = caCertPool + } return &http.Transport{ - TLSClientConfig: &tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12}, + TLSClientConfig: tlsConfig, } } diff --git a/pkg/operator/ceph/object/s3-handlers_test.go b/pkg/operator/ceph/object/s3-handlers_test.go new file mode 100644 index 000000000000..0417d7eb2549 --- /dev/null +++ b/pkg/operator/ceph/object/s3-handlers_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +func TestNewS3Agent(t *testing.T) { + accessKey := "accessKey" + secretKey := "secretKey" + endpoint := "endpoint" + region := "region" + + t.Run("test without tls/debug", func(t *testing.T) { + debug := false + insecure := false + s3Agent, err := newS3Agent(accessKey, secretKey, endpoint, region, debug, nil, insecure) + assert.NoError(t, err) + assert.NotEqual(t, aws.LogDebug, s3Agent.Client.Config.LogLevel) + assert.Equal(t, nil, s3Agent.Client.Config.HTTPClient.Transport) + assert.True(t, *s3Agent.Client.Config.DisableSSL) + }) + t.Run("test with debug without tls", func(t *testing.T) { + debug := true + logLevel := aws.LogDebug + insecure := false + s3Agent, err := newS3Agent(accessKey, secretKey, endpoint, region, debug, nil, insecure) + assert.NoError(t, err) + assert.Equal(t, &logLevel, s3Agent.Client.Config.LogLevel) + assert.Nil(t, s3Agent.Client.Config.HTTPClient.Transport) + assert.True(t, *s3Agent.Client.Config.DisableSSL) + }) + t.Run("test without tls client cert but insecure tls", func(t *testing.T) { + debug := true + insecure := true + s3Agent, err := newS3Agent(accessKey, secretKey, endpoint, region, debug, nil, insecure) + assert.NoError(t, err) + assert.Nil(t, s3Agent.Client.Config.HTTPClient.Transport.(*http.Transport).TLSClientConfig.RootCAs) + assert.True(t, s3Agent.Client.Config.HTTPClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) + assert.False(t, *s3Agent.Client.Config.DisableSSL) + }) + t.Run("test with secure tls client cert", func(t *testing.T) { + debug := true + insecure := false + tlsCert := []byte("tlsCert") + s3Agent, err := newS3Agent(accessKey, secretKey, endpoint, region, debug, tlsCert, insecure) + assert.NoError(t, err) + assert.NotNil(t, s3Agent.Client.Config.HTTPClient.Transport.(*http.Transport).TLSClientConfig.RootCAs) + assert.False(t, *s3Agent.Client.Config.DisableSSL) + }) + t.Run("test with insesure tls client cert", func(t *testing.T) { + debug := true + insecure := true + tlsCert := []byte("tlsCert") + s3Agent, err := newS3Agent(accessKey, secretKey, endpoint, region, debug, tlsCert, insecure) + assert.NoError(t, err) + assert.NotNil(t, s3Agent.Client.Config.HTTPClient.Transport) + assert.True(t, s3Agent.Client.Config.HTTPClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) + assert.False(t, *s3Agent.Client.Config.DisableSSL) + }) +} diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go index f1f1ffa04776..22f54b8bdc4d 100644 --- a/pkg/operator/ceph/object/spec.go +++ b/pkg/operator/ceph/object/spec.go @@ -38,7 +38,7 @@ import ( ) const ( - livenessProbePath = "/swift/healthcheck" + readinessProbePath = "/swift/healthcheck" // #nosec G101 since this is not leaking any hardcoded details setupVaultTokenFile = ` set -e @@ -112,7 +112,7 @@ func (c *clusterConfig) makeRGWPodSpec(rgwConfig *rgwConfig) (v1.PodTemplateSpec if c.clusterSpec.LogCollector.Enabled { shareProcessNamespace := true podSpec.ShareProcessNamespace = &shareProcessNamespace - podSpec.Containers = append(podSpec.Containers, *controller.LogCollectorContainer(strings.TrimPrefix(generateCephXUser(fmt.Sprintf("ceph-client.%s", rgwConfig.ResourceName)), "client."), c.clusterInfo.Namespace, *c.clusterSpec)) + podSpec.Containers = append(podSpec.Containers, *controller.LogCollectorContainer(getDaemonName(rgwConfig), c.clusterInfo.Namespace, *c.clusterSpec)) } // Replace default unreachable node toleration @@ -131,6 +131,27 @@ func (c *clusterConfig) makeRGWPodSpec(rgwConfig *rgwConfig) (v1.PodTemplateSpec }} podSpec.Volumes = append(podSpec.Volumes, certVol) } + // Check custom caBundle provided + if c.store.Spec.Gateway.CaBundleRef != "" { + customCaBundleVolSrc, err := c.generateVolumeSourceWithCaBundleSecret() + if err != nil { + return v1.PodTemplateSpec{}, err + } + customCaBundleVol := v1.Volume{ + Name: caBundleVolumeName, + VolumeSource: v1.VolumeSource{ + Secret: customCaBundleVolSrc, + }} + podSpec.Volumes = append(podSpec.Volumes, customCaBundleVol) + updatedCaBundleVol := v1.Volume{ + Name: caBundleUpdatedVolumeName, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }} + podSpec.Volumes = append(podSpec.Volumes, updatedCaBundleVol) + podSpec.InitContainers = append(podSpec.InitContainers, + c.createCaBundleUpdateInitContainer(rgwConfig)) + } kmsEnabled, err := c.CheckRGWKMS() if err != nil { return v1.PodTemplateSpec{}, err @@ -170,6 +191,26 @@ func (c *clusterConfig) makeRGWPodSpec(rgwConfig *rgwConfig) (v1.PodTemplateSpec return podTemplateSpec, nil } +func (c *clusterConfig) createCaBundleUpdateInitContainer(rgwConfig *rgwConfig) v1.Container { + caBundleMount := v1.VolumeMount{Name: caBundleVolumeName, MountPath: caBundleSourceCustomDir, ReadOnly: true} + volumeMounts := append(controller.DaemonVolumeMounts(c.DataPathMap, rgwConfig.ResourceName), caBundleMount) + updatedCaBundleDir := "/tmp/new-ca-bundle/" + updatedBundleMount := v1.VolumeMount{Name: caBundleUpdatedVolumeName, MountPath: updatedCaBundleDir, ReadOnly: false} + volumeMounts = append(volumeMounts, updatedBundleMount) + return v1.Container{ + Name: "update-ca-bundle-initcontainer", + Command: []string{"/bin/bash", "-c"}, + // copy all content of caBundleExtractedDir to avoid directory mount itself + Args: []string{ + fmt.Sprintf("/usr/bin/update-ca-trust extract; cp -rf %s/* %s", caBundleExtractedDir, updatedCaBundleDir), + }, + Image: c.clusterSpec.CephVersion.Image, + VolumeMounts: volumeMounts, + Resources: c.store.Spec.Gateway.Resources, + SecurityContext: controller.PodSecurityContext(), + } +} + // The vault token is passed as Secret for rgw container. So it is mounted as read only. // RGW has restrictions over vault token file, it should owned by same user(ceph) which // rgw daemon runs and all other permission should be nil or zero. Here ownership can be @@ -230,21 +271,28 @@ func (c *clusterConfig) makeDaemonContainer(rgwConfig *rgwConfig) v1.Container { ), Env: controller.DaemonEnvVars(c.clusterSpec.CephVersion.Image), Resources: c.store.Spec.Gateway.Resources, - LivenessProbe: c.generateLiveProbe(), + LivenessProbe: c.defaultLivenessProbe(), + ReadinessProbe: c.defaultReadinessProbe(), SecurityContext: controller.PodSecurityContext(), WorkingDir: cephconfig.VarLogCephDir, } // If the liveness probe is enabled configureLivenessProbe(&container, c.store.Spec.HealthCheck) + // If the readiness probe is enabled + configureReadinessProbe(&container, c.store.Spec.HealthCheck) if c.store.Spec.IsTLSEnabled() { // Add a volume mount for the ssl certificate mount := v1.VolumeMount{Name: certVolumeName, MountPath: certDir, ReadOnly: true} container.VolumeMounts = append(container.VolumeMounts, mount) } + if c.store.Spec.Gateway.CaBundleRef != "" { + updatedBundleMount := v1.VolumeMount{Name: caBundleUpdatedVolumeName, MountPath: caBundleExtractedDir, ReadOnly: true} + container.VolumeMounts = append(container.VolumeMounts, updatedBundleMount) + } kmsEnabled, err := c.CheckRGWKMS() if err != nil { - logger.Errorf("enabling KMS failed %v", err) + logger.Errorf("failed to enable KMS. %v", err) return v1.Container{} } if kmsEnabled { @@ -276,7 +324,7 @@ func configureLivenessProbe(container *v1.Container, healthCheck cephv1.BucketHe // If the spec value is empty, let's use a default if probe != nil { // Set the liveness probe on the container to overwrite the default probe created by Rook - container.LivenessProbe = cephconfig.GetLivenessProbeWithDefaults(probe, container.LivenessProbe) + container.LivenessProbe = cephconfig.GetProbeWithDefaults(probe, container.LivenessProbe) } } else { container.LivenessProbe = nil @@ -284,20 +332,47 @@ func configureLivenessProbe(container *v1.Container, healthCheck cephv1.BucketHe } } -func (c *clusterConfig) generateLiveProbe() *v1.Probe { +// configureReadinessProbe returns the desired readiness probe for a given daemon +func configureReadinessProbe(container *v1.Container, healthCheck cephv1.BucketHealthCheckSpec) { + if ok := healthCheck.ReadinessProbe; ok != nil { + if !healthCheck.ReadinessProbe.Disabled { + probe := healthCheck.ReadinessProbe.Probe + // If the spec value is empty, let's use a default + if probe != nil { + // Set the readiness probe on the container to overwrite the default probe created by Rook + container.ReadinessProbe = cephconfig.GetProbeWithDefaults(probe, container.ReadinessProbe) + } + } else { + container.ReadinessProbe = nil + } + } +} + +func (c *clusterConfig) defaultLivenessProbe() *v1.Probe { + return &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: c.generateProbePort(), + }, + }, + InitialDelaySeconds: 10, + } +} + +func (c *clusterConfig) defaultReadinessProbe() *v1.Probe { return &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ - Path: livenessProbePath, - Port: c.generateLiveProbePort(), - Scheme: c.generateLiveProbeScheme(), + Path: readinessProbePath, + Port: c.generateProbePort(), + Scheme: c.generateReadinessProbeScheme(), }, }, InitialDelaySeconds: 10, } } -func (c *clusterConfig) generateLiveProbeScheme() v1.URIScheme { +func (c *clusterConfig) generateReadinessProbeScheme() v1.URIScheme { // Default to HTTP uriScheme := v1.URISchemeHTTP @@ -310,7 +385,7 @@ func (c *clusterConfig) generateLiveProbeScheme() v1.URIScheme { return uriScheme } -func (c *clusterConfig) generateLiveProbePort() intstr.IntOrString { +func (c *clusterConfig) generateProbePort() intstr.IntOrString { // The port the liveness probe needs to probe // Assume we run on SDN by default port := intstr.FromInt(int(rgwPortInternalPort)) @@ -342,7 +417,7 @@ func (c *clusterConfig) generateService(cephObjectStore *cephv1.CephObjectStore) svc.Spec.ClusterIP = v1.ClusterIPNone } - destPort := c.generateLiveProbePort() + destPort := c.generateProbePort() // When the cluster is external we must use the same one as the gateways are listening on if cephObjectStore.Spec.IsExternal() { @@ -439,18 +514,24 @@ func (c *clusterConfig) vaultPrefixRGW() string { func (c *clusterConfig) CheckRGWKMS() (bool, error) { if c.store.Spec.Security != nil && c.store.Spec.Security.KeyManagementService.IsEnabled() { - err := kms.ValidateConnectionDetails(c.context, *c.store.Spec.Security, c.store.Namespace) + err := kms.ValidateConnectionDetails(c.context, c.store.Spec.Security, c.store.Namespace) if err != nil { return false, err } secretEngine := c.store.Spec.Security.KeyManagementService.ConnectionDetails[kms.VaultSecretEngineKey] - kvVers := c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] // currently RGW supports kv(version 2) and transit secret engines in vault switch secretEngine { case kms.VaultKVSecretEngineKey: - if kvVers != "v2" { - return false, errors.New("failed to validate vault kv version, only v2 is supported") + kvVers := c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] + if kvVers != "" { + if kvVers != "v2" { + return false, errors.New("failed to validate vault kv version, only v2 is supported") + } + } else { + // If VAUL_BACKEND is not specified let's assume it's v2 + logger.Warningf("%s is not set, assuming the only supported version 2", vault.VaultBackendKey) + c.store.Spec.Security.KeyManagementService.ConnectionDetails[vault.VaultBackendKey] = "v2" } return true, nil case kms.VaultTransitSecretEngineKey: @@ -460,6 +541,7 @@ func (c *clusterConfig) CheckRGWKMS() (bool, error) { } } + return false, nil } @@ -504,7 +586,7 @@ func (c *clusterConfig) generateVolumeSourceWithTLSSecret() (*v1.SecretVolumeSou secretVolSrc = &v1.SecretVolumeSource{ SecretName: c.store.Spec.Gateway.SSLCertificateRef, } - secretType, err := c.rgwTLSSecretType() + secretType, err := c.rgwTLSSecretType(c.store.Spec.Gateway.SSLCertificateRef) if err != nil { return nil, err } @@ -533,10 +615,34 @@ func (c *clusterConfig) generateVolumeSourceWithTLSSecret() (*v1.SecretVolumeSou return secretVolSrc, nil } -func (c *clusterConfig) rgwTLSSecretType() (v1.SecretType, error) { - rgwTlsSecret, err := c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Get(context.TODO(), c.store.Spec.Gateway.SSLCertificateRef, metav1.GetOptions{}) +func (c *clusterConfig) generateVolumeSourceWithCaBundleSecret() (*v1.SecretVolumeSource, error) { + // Keep the ca-bundle as secure as possible in the container. Give only user read perms. + // Same as above for generateVolumeSourceWithTLSSecret function. + userReadOnly := int32(0400) + caBundleVolSrc := &v1.SecretVolumeSource{ + SecretName: c.store.Spec.Gateway.CaBundleRef, + } + secretType, err := c.rgwTLSSecretType(c.store.Spec.Gateway.CaBundleRef) + if err != nil { + return nil, err + } + if secretType != v1.SecretTypeOpaque { + return nil, errors.New("CaBundle secret should be 'Opaque' type") + } + caBundleVolSrc.Items = []v1.KeyToPath{ + {Key: caBundleKeyName, Path: caBundleFileName, Mode: &userReadOnly}, + } + return caBundleVolSrc, nil +} + +func (c *clusterConfig) rgwTLSSecretType(secretName string) (v1.SecretType, error) { + rgwTlsSecret, err := c.context.Clientset.CoreV1().Secrets(c.clusterInfo.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if rgwTlsSecret != nil { return rgwTlsSecret.Type, nil } return "", errors.Wrapf(err, "no Kubernetes secrets referring TLS certificates found") } + +func getDaemonName(rgwConfig *rgwConfig) string { + return fmt.Sprintf("ceph-%s", generateCephXUser(rgwConfig.ResourceName)) +} diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go index 479186937860..2309677dce37 100644 --- a/pkg/operator/ceph/object/spec_test.go +++ b/pkg/operator/ceph/object/spec_test.go @@ -36,6 +36,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) func TestPodSpecs(t *testing.T) { @@ -189,6 +190,24 @@ func TestSSLPodSpec(t *testing.T) { secretVolSrc, err = c.generateVolumeSourceWithTLSSecret() assert.NoError(t, err) assert.Equal(t, secretVolSrc.SecretName, "rgw-cert") + // Using caBundleRef + // Opaque Secret + c.store.Spec.Gateway.CaBundleRef = "mycabundle" + cabundlesecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.store.Spec.Gateway.CaBundleRef, + Namespace: c.store.Namespace, + }, + Data: map[string][]byte{ + "cabundle": []byte("cabundletesting"), + }, + Type: v1.SecretTypeOpaque, + } + _, err = c.context.Clientset.CoreV1().Secrets(store.Namespace).Create(ctx, cabundlesecret, metav1.CreateOptions{}) + assert.NoError(t, err) + caBundleVolSrc, err := c.generateVolumeSourceWithCaBundleSecret() + assert.NoError(t, err) + assert.Equal(t, caBundleVolSrc.SecretName, "mycabundle") s, err = c.makeRGWPodSpec(rgwConfig) assert.NoError(t, err) podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) @@ -265,7 +284,7 @@ func TestValidateSpec(t *testing.T) { assert.Nil(t, err) } -func TestGenerateLiveProbe(t *testing.T) { +func TestDefaultLivenessProbe(t *testing.T) { store := simpleStore() c := &clusterConfig{ store: store, @@ -276,33 +295,93 @@ func TestGenerateLiveProbe(t *testing.T) { }, } + desiredProbe := &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.FromInt(8080), + }, + }, + InitialDelaySeconds: 10, + } // No SSL - HostNetwork is disabled - using internal port - p := c.generateLiveProbe() - assert.Equal(t, int32(8080), p.Handler.HTTPGet.Port.IntVal) - assert.Equal(t, v1.URISchemeHTTP, p.Handler.HTTPGet.Scheme) + p := c.defaultLivenessProbe() + assert.Equal(t, desiredProbe, p) // No SSL - HostNetwork is enabled c.store.Spec.Gateway.Port = 123 c.store.Spec.Gateway.SecurePort = 0 c.clusterSpec.Network.HostNetwork = true - p = c.generateLiveProbe() - assert.Equal(t, int32(123), p.Handler.HTTPGet.Port.IntVal) + p = c.defaultLivenessProbe() + desiredProbe.Handler.TCPSocket.Port = intstr.FromInt(123) + assert.Equal(t, desiredProbe, p) // SSL - HostNetwork is enabled c.store.Spec.Gateway.Port = 0 c.store.Spec.Gateway.SecurePort = 321 c.store.Spec.Gateway.SSLCertificateRef = "foo" - p = c.generateLiveProbe() - assert.Equal(t, int32(321), p.Handler.HTTPGet.Port.IntVal) + p = c.defaultLivenessProbe() + desiredProbe.Handler.TCPSocket.Port = intstr.FromInt(321) + assert.Equal(t, desiredProbe, p) // Both Non-SSL and SSL are enabled - // liveprobe just on Non-SSL + // livenessProbe just on Non-SSL + c.store.Spec.Gateway.Port = 123 + c.store.Spec.Gateway.SecurePort = 321 + p = c.defaultLivenessProbe() + desiredProbe.Handler.TCPSocket.Port = intstr.FromInt(123) + assert.Equal(t, desiredProbe, p) +} + +func TestDefaultReadinessProbe(t *testing.T) { + store := simpleStore() + c := &clusterConfig{ + store: store, + clusterSpec: &cephv1.ClusterSpec{ + Network: cephv1.NetworkSpec{ + HostNetwork: false, + }, + }, + } + + desiredProbe := &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Path: readinessProbePath, + Port: intstr.FromInt(8080), + Scheme: v1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 10, + } + // No SSL - HostNetwork is disabled - using internal port + p := c.defaultReadinessProbe() + assert.Equal(t, desiredProbe, p) + + // No SSL - HostNetwork is enabled c.store.Spec.Gateway.Port = 123 + c.store.Spec.Gateway.SecurePort = 0 + c.clusterSpec.Network.HostNetwork = true + p = c.defaultReadinessProbe() + desiredProbe.Handler.HTTPGet.Port = intstr.FromInt(123) + assert.Equal(t, desiredProbe, p) + + // SSL - HostNetwork is enabled + c.store.Spec.Gateway.Port = 0 c.store.Spec.Gateway.SecurePort = 321 c.store.Spec.Gateway.SSLCertificateRef = "foo" - p = c.generateLiveProbe() - assert.Equal(t, v1.URISchemeHTTP, p.Handler.HTTPGet.Scheme) - assert.Equal(t, int32(123), p.Handler.HTTPGet.Port.IntVal) + p = c.defaultReadinessProbe() + desiredProbe.Handler.HTTPGet.Port = intstr.FromInt(321) + desiredProbe.Handler.HTTPGet.Scheme = v1.URISchemeHTTPS + assert.Equal(t, desiredProbe, p) + + // Both Non-SSL and SSL are enabled + // readinessProbe just on Non-SSL + c.store.Spec.Gateway.Port = 123 + c.store.Spec.Gateway.SecurePort = 321 + p = c.defaultReadinessProbe() + desiredProbe.Handler.HTTPGet.Port = intstr.FromInt(123) + desiredProbe.Handler.HTTPGet.Scheme = v1.URISchemeHTTP + assert.Equal(t, desiredProbe, p) } func TestCheckRGWKMS(t *testing.T) { @@ -344,6 +423,7 @@ func TestCheckRGWKMS(t *testing.T) { // kv engine version v1, will fail c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_SECRET_ENGINE"] = "kv" + c.store.Spec.Security.KeyManagementService.ConnectionDetails["VAULT_BACKEND"] = "v1" b, err = c.CheckRGWKMS() assert.False(t, b) assert.Error(t, err) @@ -361,3 +441,33 @@ func TestCheckRGWKMS(t *testing.T) { assert.True(t, b) assert.NoError(t, err) } + +func TestGetDaemonName(t *testing.T) { + context := &clusterd.Context{Clientset: test.New(t, 3)} + store := simpleStore() + tests := []struct { + storeName string + testDaemonName string + daemonID string + }{ + {"default", "ceph-client.rgw.default.a", "a"}, + {"my-store", "ceph-client.rgw.my.store.b", "b"}, + } + for _, tt := range tests { + t.Run(tt.storeName, func(t *testing.T) { + c := &clusterConfig{ + context: context, + store: store, + } + c.store.Name = tt.storeName + daemonName := fmt.Sprintf("%s-%s", c.store.Name, tt.daemonID) + resourceName := fmt.Sprintf("%s-%s", AppName, daemonName) + rgwconfig := &rgwConfig{ + ResourceName: resourceName, + DaemonID: daemonName, + } + daemon := getDaemonName(rgwconfig) + assert.Equal(t, tt.testDaemonName, daemon) + }) + } +} diff --git a/pkg/operator/ceph/object/status.go b/pkg/operator/ceph/object/status.go index a369a981b1ef..ddf4c5ad4287 100644 --- a/pkg/operator/ceph/object/status.go +++ b/pkg/operator/ceph/object/status.go @@ -89,12 +89,12 @@ func updateStatusBucket(client client.Client, name types.NamespacedName, status } objectStore.Status.BucketStatus = toCustomResourceStatus(objectStore.Status.BucketStatus, details, status) + // do not transition to other statuses once deletion begins if objectStore.Status.Phase != cephv1.ConditionDeleting { - // do not transition to to other statuses once deletion begins - logger.Debugf("object store %q status not updated to %q because it is deleting", name.String(), status) objectStore.Status.Phase = status } + // but we still need to update the health checker status if err := reporting.UpdateStatus(client, objectStore); err != nil { return errors.Wrapf(err, "failed to set object store %q status to %v", name.String(), status) } diff --git a/pkg/operator/ceph/object/user.go b/pkg/operator/ceph/object/user.go index a5a7ad4e2f87..ce9c18dd38db 100644 --- a/pkg/operator/ceph/object/user.go +++ b/pkg/operator/ceph/object/user.go @@ -37,13 +37,16 @@ const ( // An ObjectUser defines the details of an object store user. type ObjectUser struct { - UserID string `json:"userId"` - DisplayName *string `json:"displayName"` - Email *string `json:"email"` - AccessKey *string `json:"accessKey"` - SecretKey *string `json:"secretKey"` - SystemUser bool `json:"systemuser"` - AdminOpsUser bool `json:"adminopsuser"` + UserID string `json:"userId"` + DisplayName *string `json:"displayName"` + Email *string `json:"email"` + AccessKey *string `json:"accessKey"` + SecretKey *string `json:"secretKey"` + SystemUser bool `json:"systemuser"` + AdminOpsUser bool `json:"adminopsuser"` + MaxBuckets int `json:"max_buckets"` + UserQuota admin.QuotaSpec `json:"user_quota"` + Caps []admin.UserCapSpec `json:"caps"` } // func decodeUser(data string) (*ObjectUser, int, error) { @@ -56,6 +59,18 @@ func decodeUser(data string) (*ObjectUser, int, error) { rookUser := ObjectUser{UserID: user.ID, DisplayName: &user.DisplayName, Email: &user.Email} + if len(user.Caps) > 0 { + rookUser.Caps = user.Caps + } + + if user.MaxBuckets != nil { + rookUser.MaxBuckets = *user.MaxBuckets + } + + if user.UserQuota.Enabled != nil { + rookUser.UserQuota = user.UserQuota + } + if len(user.Keys) > 0 { rookUser.AccessKey = &user.Keys[0].AccessKey rookUser.SecretKey = &user.Keys[0].SecretKey @@ -121,6 +136,10 @@ func CreateUser(c *Context, user ObjectUser) (*ObjectUser, int, error) { result, err := runAdminCommand(c, true, args...) if err != nil { + if code, err := exec.ExtractExitCode(err); err == nil && code == int(syscall.EEXIST) { + return nil, ErrorCodeFileExists, errors.New("s3 user already exists") + } + if strings.Contains(result, "could not create user: unable to create user, user: ") { return nil, ErrorCodeFileExists, errors.New("s3 user already exists") } diff --git a/pkg/operator/ceph/object/user/controller.go b/pkg/operator/ceph/object/user/controller.go index 66f6132420ba..8a9e5cb7fdd3 100644 --- a/pkg/operator/ceph/object/user/controller.go +++ b/pkg/operator/ceph/object/user/controller.go @@ -163,7 +163,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteUser() function since everything is gone already @@ -287,13 +287,44 @@ func (r *ReconcileObjectStoreUser) createorUpdateCephUser(u *cephv1.CephObjectSt } else { return errors.Wrapf(err, "failed to get details from ceph object user %q", u.Name) } + } else if *user.MaxBuckets != *r.userConfig.MaxBuckets { + // TODO: handle update for user capabilities, depends on https://github.com/ceph/go-ceph/pull/571 + user, err = r.objContext.AdminOpsClient.ModifyUser(context.TODO(), *r.userConfig) + if err != nil { + return errors.Wrapf(err, "failed to create ceph object user %v", &r.userConfig.ID) + } + logCreateOrUpdate = fmt.Sprintf("updated ceph object user %q", u.Name) + } + + var quotaEnabled = false + var maxSize int64 = -1 + var maxObjects int64 = -1 + if u.Spec.Quotas != nil { + if u.Spec.Quotas.MaxObjects != nil { + maxObjects = *u.Spec.Quotas.MaxObjects + quotaEnabled = true + } + if u.Spec.Quotas.MaxSize != nil { + maxSize = u.Spec.Quotas.MaxSize.Value() + quotaEnabled = true + } + } + userQuota := admin.QuotaSpec{ + UID: u.Name, + Enabled: "aEnabled, + MaxSize: &maxSize, + MaxObjects: &maxObjects, + } + err = r.objContext.AdminOpsClient.SetUserQuota(context.TODO(), userQuota) + if err != nil { + return errors.Wrapf(err, "failed to set quotas for user %q", u.Name) } // Set access and secret key r.userConfig.Keys[0].AccessKey = user.Keys[0].AccessKey r.userConfig.Keys[0].SecretKey = user.Keys[0].SecretKey - logger.Info(logCreateOrUpdate) + return nil } @@ -340,6 +371,30 @@ func generateUserConfig(user *cephv1.CephObjectStoreUser) admin.User { Keys: make([]admin.UserKeySpec, 1), } + defaultMaxBuckets := 1000 + userConfig.MaxBuckets = &defaultMaxBuckets + if user.Spec.Quotas != nil && user.Spec.Quotas.MaxBuckets != nil { + userConfig.MaxBuckets = user.Spec.Quotas.MaxBuckets + } + + if user.Spec.Capabilities != nil { + if user.Spec.Capabilities.User != "" { + userConfig.UserCaps += fmt.Sprintf("users=%s;", user.Spec.Capabilities.User) + } + if user.Spec.Capabilities.Bucket != "" { + userConfig.UserCaps += fmt.Sprintf("buckets=%s;", user.Spec.Capabilities.Bucket) + } + if user.Spec.Capabilities.MetaData != "" { + userConfig.UserCaps += fmt.Sprintf("metadata=%s;", user.Spec.Capabilities.MetaData) + } + if user.Spec.Capabilities.Usage != "" { + userConfig.UserCaps += fmt.Sprintf("usage=%s;", user.Spec.Capabilities.Usage) + } + if user.Spec.Capabilities.Zone != "" { + userConfig.UserCaps += fmt.Sprintf("zone=%s;", user.Spec.Capabilities.Zone) + } + } + return userConfig } diff --git a/pkg/operator/ceph/object/user/controller_test.go b/pkg/operator/ceph/object/user/controller_test.go index b2fec0a04062..ad4550e50d8c 100644 --- a/pkg/operator/ceph/object/user/controller_test.go +++ b/pkg/operator/ceph/object/user/controller_test.go @@ -38,6 +38,7 @@ import ( exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -88,9 +89,12 @@ const ( ) var ( - name = "my-user" - namespace = "rook-ceph" - store = "my-store" + name = "my-user" + namespace = "rook-ceph" + store = "my-store" + maxbucket = 200 + maxsizestr = "10G" + maxobject int64 = 10000 ) func TestCephObjectStoreUserController(t *testing.T) { @@ -306,7 +310,8 @@ func TestCephObjectStoreUserController(t *testing.T) { newMultisiteAdminOpsCtxFunc = func(objContext *cephobject.Context, spec *cephv1.ObjectStoreSpec) (*cephobject.AdminOpsContext, error) { mockClient := &cephobject.MockClient{ MockDo: func(req *http.Request) (*http.Response, error) { - if req.URL.RawQuery == "display-name=my-user&format=json&uid=my-user" && req.Method == http.MethodGet && req.URL.Path == "rook-ceph-rgw-my-store.mycluster.svc/admin/user" { + if (req.URL.RawQuery == "display-name=my-user&format=json&max-buckets=1000&uid=my-user" && (req.Method == http.MethodGet || req.Method == http.MethodPost) && req.URL.Path == "rook-ceph-rgw-my-store.mycluster.svc/admin/user") || + (req.URL.RawQuery == "enabled=false&format=json&max-objects=-1&max-size=-1"a="a-type=user&uid=my-user" && req.Method == http.MethodPut && req.URL.Path == "rook-ceph-rgw-my-store.mycluster.svc/admin/user") { return &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader([]byte(userCreateJSON))), @@ -353,3 +358,148 @@ func TestBuildUpdateStatusInfo(t *testing.T) { assert.NotEmpty(t, statusInfo["secretName"]) assert.Equal(t, "rook-ceph-object-user-my-store-my-user", statusInfo["secretName"]) } + +func TestCreateorUpdateCephUser(t *testing.T) { + // Set DEBUG logging + capnslog.SetGlobalLogLevel(capnslog.DEBUG) + + objectUser := &cephv1.CephObjectStoreUser{ + ObjectMeta: metav1.ObjectMeta{ + Name: "", + Namespace: namespace, + }, + Spec: cephv1.ObjectStoreUserSpec{ + Store: store, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "CephObjectStoreUser", + }, + } + mockClient := &cephobject.MockClient{ + MockDo: func(req *http.Request) (*http.Response, error) { + if req.URL.Path != "rook-ceph-rgw-my-store.mycluster.svc/admin/user" { + return nil, fmt.Errorf("unexpected url path %q", req.URL.Path) + } + + if req.Method == http.MethodGet || req.Method == http.MethodPost { + if req.URL.RawQuery == "display-name=my-user&format=json&max-buckets=1000&uid=my-user" || + req.URL.RawQuery == "display-name=my-user&format=json&max-buckets=200&uid=my-user" || + req.URL.RawQuery == "display-name=my-user&format=json&max-buckets=1000&uid=my-user&user-caps=users%3Dread%3Bbuckets%3Dread%3B" || + req.URL.RawQuery == "display-name=my-user&format=json&max-buckets=200&uid=my-user&user-caps=users%3Dread%3Bbuckets%3Dread%3B" { + return &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(userCreateJSON))), + }, nil + } + } + + if req.Method == http.MethodPut { + if req.URL.RawQuery == "enabled=false&format=json&max-objects=-1&max-size=-1"a="a-type=user&uid=my-user" || + req.URL.RawQuery == "enabled=true&format=json&max-objects=10000&max-size=-1"a="a-type=user&uid=my-user" || + req.URL.RawQuery == "enabled=true&format=json&max-objects=-1&max-size=10000000000"a="a-type=user&uid=my-user" || + req.URL.RawQuery == "enabled=true&format=json&max-objects=10000&max-size=10000000000"a="a-type=user&uid=my-user" { + return &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(userCreateJSON))), + }, nil + } + } + + return nil, fmt.Errorf("unexpected request: %q. method %q. path %q", req.URL.RawQuery, req.Method, req.URL.Path) + }, + } + adminClient, err := admin.New("rook-ceph-rgw-my-store.mycluster.svc", "53S6B9S809NUP19IJ2K3", "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", mockClient) + assert.NoError(t, err) + userConfig := generateUserConfig(objectUser) + r := &ReconcileObjectStoreUser{ + objContext: &cephobject.AdminOpsContext{ + AdminOpsClient: adminClient, + }, + userConfig: &userConfig, + } + maxsize, err := resource.ParseQuantity(maxsizestr) + assert.NoError(t, err) + + t.Run("user with empty name", func(t *testing.T) { + err = r.createorUpdateCephUser(objectUser) + assert.Error(t, err) + }) + + t.Run("user without any Quotas or Capabilities", func(t *testing.T) { + objectUser.Name = name + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + + t.Run("setting MaxBuckets for the user", func(t *testing.T) { + objectUser.Spec.Quotas = &cephv1.ObjectUserQuotaSpec{MaxBuckets: &maxbucket} + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + + t.Run("setting Capabilities for the user", func(t *testing.T) { + objectUser.Spec.Quotas = nil + objectUser.Spec.Capabilities = &cephv1.ObjectUserCapSpec{ + User: "read", + Bucket: "read", + } + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + + // Testing UserQuotaSpec : MaxObjects and MaxSize + t.Run("setting MaxObjects for the user", func(t *testing.T) { + objectUser.Spec.Capabilities = nil + objectUser.Spec.Quotas = &cephv1.ObjectUserQuotaSpec{MaxObjects: &maxobject} + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + t.Run("setting MaxSize for the user", func(t *testing.T) { + objectUser.Spec.Quotas = &cephv1.ObjectUserQuotaSpec{MaxSize: &maxsize} + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + t.Run("resetting MaxSize and MaxObjects for the user", func(t *testing.T) { + objectUser.Spec.Quotas = nil + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + t.Run("setting both MaxSize and MaxObjects for the user", func(t *testing.T) { + objectUser.Spec.Quotas = &cephv1.ObjectUserQuotaSpec{MaxObjects: &maxobject, MaxSize: &maxsize} + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + t.Run("resetting MaxSize and MaxObjects again for the user", func(t *testing.T) { + objectUser.Spec.Quotas = nil + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) + + t.Run("setting both Quotas and Capabilities for the user", func(t *testing.T) { + objectUser.Spec.Capabilities = &cephv1.ObjectUserCapSpec{ + User: "read", + Bucket: "read", + } + objectUser.Spec.Quotas = &cephv1.ObjectUserQuotaSpec{MaxBuckets: &maxbucket, MaxObjects: &maxobject, MaxSize: &maxsize} + userConfig = generateUserConfig(objectUser) + r.userConfig = &userConfig + err = r.createorUpdateCephUser(objectUser) + assert.NoError(t, err) + }) +} diff --git a/pkg/operator/ceph/object/zone/controller.go b/pkg/operator/ceph/object/zone/controller.go index 975a1926227e..6daccdcd75d9 100644 --- a/pkg/operator/ceph/object/zone/controller.go +++ b/pkg/operator/ceph/object/zone/controller.go @@ -144,7 +144,7 @@ func (r *ReconcileObjectZone) reconcile(request reconcile.Request) (reconcile.Re } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // diff --git a/pkg/operator/ceph/object/zonegroup/controller.go b/pkg/operator/ceph/object/zonegroup/controller.go index 34c5a3f900fd..98c4bf983af3 100644 --- a/pkg/operator/ceph/object/zonegroup/controller.go +++ b/pkg/operator/ceph/object/zonegroup/controller.go @@ -142,7 +142,7 @@ func (r *ReconcileObjectZoneGroup) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR if !cephObjectZoneGroup.GetDeletionTimestamp().IsZero() && !cephClusterExists { diff --git a/pkg/operator/ceph/operator.go b/pkg/operator/ceph/operator.go index 85be89ebea66..6569a637c54f 100644 --- a/pkg/operator/ceph/operator.go +++ b/pkg/operator/ceph/operator.go @@ -33,12 +33,11 @@ import ( "github.com/rook/rook/pkg/operator/ceph/cluster" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/csi" + "github.com/rook/rook/pkg/operator/ceph/csi/peermap" "github.com/rook/rook/pkg/operator/ceph/provisioner" "github.com/rook/rook/pkg/operator/discover" "github.com/rook/rook/pkg/operator/k8sutil" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" ) @@ -60,6 +59,10 @@ var ( // ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object. ImmediateRetryResult = reconcile.Result{Requeue: true} + + // Signals to watch for to terminate the operator gracefully + // Using os.Interrupt is more portable across platforms instead of os.SIGINT + shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} ) // Operator type for managing storage @@ -126,8 +129,10 @@ func (o *Operator) Run() error { return errors.Errorf("rook operator namespace is not provided. expose it via downward API in the rook operator manifest file using environment variable %q", k8sutil.PodNamespaceEnvVar) } - // creating a context - stopContext, stopFunc := context.WithCancel(context.Background()) + opcontroller.SetCephCommandsTimeout(o.context) + + // Initialize signal handler and context + stopContext, stopFunc := signal.NotifyContext(context.Background(), shutdownSignals...) defer stopFunc() rookDiscover := discover.New(o.context.Clientset) @@ -151,10 +156,8 @@ func (o *Operator) Run() error { return errors.Wrap(err, "failed to get server version") } - // Initialize signal handler - signalChan := make(chan os.Signal, 1) + // Initialize stop channel for watchers stopChan := make(chan struct{}) - signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) // For Flex Driver, run volume provisioner for each of the supported configurations if opcontroller.FlexDriverEnabled(o.context) { @@ -190,8 +193,8 @@ func (o *Operator) Run() error { // Signal handler to stop the operator for { select { - case <-signalChan: - logger.Info("shutdown signal received, exiting...") + case <-stopContext.Done(): + logger.Infof("shutdown signal received, exiting... %v", stopContext.Err()) o.cleanup(stopChan) return nil case err := <-mgrErrorChan: @@ -248,7 +251,8 @@ func (o *Operator) updateDrivers() error { return nil } - ownerRef, err := getDeploymentOwnerReference(o.context.Clientset, o.operatorNamespace) + operatorPodName := os.Getenv(k8sutil.PodNameEnvVar) + ownerRef, err := k8sutil.GetDeploymentOwnerReference(o.context.Clientset, operatorPodName, o.operatorNamespace) if err != nil { logger.Warningf("could not find deployment owner reference to assign to csi drivers. %v", err) } @@ -265,35 +269,11 @@ func (o *Operator) updateDrivers() error { return errors.Wrap(err, "failed creating csi config map") } - go csi.ValidateAndConfigureDrivers(o.context, o.operatorNamespace, o.rookImage, o.securityAccount, serverVersion, ownerInfo) - return nil -} - -// getDeploymentOwnerReference returns an OwnerReference to the rook-ceph-operator deployment -func getDeploymentOwnerReference(clientset kubernetes.Interface, namespace string) (*metav1.OwnerReference, error) { - ctx := context.TODO() - var deploymentRef *metav1.OwnerReference - podName := os.Getenv(k8sutil.PodNameEnvVar) - pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) + err = peermap.CreateOrUpdateConfig(o.context, &peermap.PeerIDMappings{}) if err != nil { - return nil, errors.Wrapf(err, "could not find pod %q to find deployment owner reference", podName) + return errors.Wrap(err, "failed to create pool ID mapping config map") } - for _, podOwner := range pod.OwnerReferences { - if podOwner.Kind == "ReplicaSet" { - replicaset, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, podOwner.Name, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "could not find replicaset %q to find deployment owner reference", podOwner.Name) - } - for _, replicasetOwner := range replicaset.OwnerReferences { - if replicasetOwner.Kind == "Deployment" { - localreplicasetOwner := replicasetOwner - deploymentRef = &localreplicasetOwner - } - } - } - } - if deploymentRef == nil { - return nil, errors.New("could not find owner reference for rook-ceph deployment") - } - return deploymentRef, nil + + go csi.ValidateAndConfigureDrivers(o.context, o.operatorNamespace, o.rookImage, o.securityAccount, serverVersion, ownerInfo) + return nil } diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go index 943bccc2ae7b..ae4e1564f1ee 100644 --- a/pkg/operator/ceph/pool/controller.go +++ b/pkg/operator/ceph/pool/controller.go @@ -31,7 +31,10 @@ import ( "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/ceph/cluster/mgr" "github.com/rook/rook/pkg/operator/ceph/cluster/mon" + "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/csi/peermap" + "github.com/rook/rook/pkg/operator/k8sutil" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -165,7 +168,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deletePool() function since everything is gone already @@ -192,6 +195,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to populate cluster info") } r.clusterInfo = clusterInfo + r.clusterInfo.NetworkSpec = cephCluster.Spec.Network // Initialize the channel for this pool // This allows us to track multiple CephBlockPool in the same namespace @@ -282,7 +286,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile logger.Debug("reconciling create rbd mirror peer configuration") if cephBlockPool.Spec.Mirroring.Enabled { // Always create a bootstrap peer token in case another cluster wants to add us as a peer - reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, clusterInfo, cephBlockPool, request.NamespacedName, r.scheme) + reconcileResponse, err = opcontroller.CreateBootstrapPeerSecret(r.context, clusterInfo, cephBlockPool, k8sutil.NewOwnerInfo(cephBlockPool, r.scheme)) if err != nil { updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure, nil) return reconcileResponse, errors.Wrapf(err, "failed to create rbd-mirror bootstrap peer for pool %q.", cephBlockPool.GetName()) @@ -301,6 +305,19 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile } } + // Add bootstrap peer if any + logger.Debug("reconciling ceph bootstrap peers import") + reconcileResponse, err = r.reconcileAddBoostrapPeer(cephBlockPool, request.NamespacedName) + if err != nil { + return reconcileResponse, errors.Wrap(err, "failed to add ceph rbd mirror peer") + } + + // ReconcilePoolIDMap updates the `rook-ceph-csi-mapping-config` with local and peer cluster pool ID map + err = peermap.ReconcilePoolIDMap(r.context, r.clusterInfo, cephBlockPool) + if err != nil { + return reconcileResponse, errors.Wrapf(err, "failed to update pool ID mapping config for the pool %q", cephBlockPool.Name) + } + // Set Ready status, we are done reconciling updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady, opcontroller.GenerateStatusInfo(cephBlockPool)) @@ -340,6 +357,14 @@ func createPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, return errors.Wrapf(err, "failed to create pool %q", p.Name) } + logger.Infof("initializing pool %q", p.Name) + args := []string{"pool", "init", p.Name} + output, err := cephclient.NewRBDCommand(context, clusterInfo, args).Run() + if err != nil { + return errors.Wrapf(err, "failed to initialize pool %q. %s", p.Name, string(output)) + } + logger.Infof("successfully initialized pool %q", p.Name) + return nil } @@ -379,7 +404,12 @@ func configureRBDStats(clusterContext *clusterd.Context, clusterInfo *cephclient } } logger.Debugf("RBD per-image IO statistics will be collected for pools: %v", enableStatsForPools) - _, err = cephclient.SetConfig(clusterContext, clusterInfo, "mgr.", "mgr/prometheus/rbd_stats_pools", strings.Join(enableStatsForPools, ","), false) + monStore := config.GetMonStore(clusterContext, clusterInfo) + if len(enableStatsForPools) == 0 { + err = monStore.Delete("mgr.", "mgr/prometheus/rbd_stats_pools") + } else { + err = monStore.Set("mgr.", "mgr/prometheus/rbd_stats_pools", strings.Join(enableStatsForPools, ",")) + } if err != nil { return errors.Wrapf(err, "failed to enable rbd_stats_pools") } diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go index 6f8046525c86..380479ceea3f 100644 --- a/pkg/operator/ceph/pool/controller_test.go +++ b/pkg/operator/ceph/pool/controller_test.go @@ -20,6 +20,7 @@ import ( "context" "os" "testing" + "time" "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" @@ -33,6 +34,8 @@ import ( exectest "github.com/rook/rook/pkg/util/exec/test" "github.com/stretchr/testify/assert" "github.com/tevino/abool" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -50,6 +53,9 @@ func TestCreatePool(t *testing.T) { if command == "ceph" && args[1] == "erasure-code-profile" { return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil } + if command == "rbd" { + assert.Equal(t, []string{"pool", "init", "mypool"}, args[0:3]) + } return "", nil }, } @@ -147,6 +153,9 @@ func TestCephBlockPoolController(t *testing.T) { Replicated: cephv1.ReplicatedSpec{ Size: replicas, }, + Mirroring: cephv1.MirroringSpec{ + Peers: &cephv1.MirroringPeerSpec{}, + }, StatusCheck: cephv1.MirrorHealthCheckSpec{ Mirror: cephv1.HealthCheckSpec{ Disabled: true, @@ -344,7 +353,44 @@ func TestCephBlockPoolController(t *testing.T) { blockPoolChannels: make(map[string]*blockPoolHealth), } + os.Setenv("POD_NAME", "test") + defer os.Setenv("POD_NAME", "") + os.Setenv("POD_NAMESPACE", namespace) + defer os.Setenv("POD_NAMESPACE", "") + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testReplicaSet", + }, + }, + }, + } + // Create fake pod + _, err = r.context.Clientset.CoreV1().Pods(namespace).Create(context.TODO(), p, metav1.CreateOptions{}) + assert.NoError(t, err) + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testReplicaSet", + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + }, + }, + }, + } + + // Create fake replicaset + _, err = r.context.Clientset.AppsV1().ReplicaSets(namespace).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) + assert.NoError(t, err) + pool.Spec.Mirroring.Mode = "image" + pool.Spec.Mirroring.Peers.SecretNames = []string{} err = r.client.Update(context.TODO(), pool) assert.NoError(t, err) for i := 0; i < 5; i++ { @@ -370,7 +416,47 @@ func TestCephBlockPoolController(t *testing.T) { } // - // TEST 6: Mirroring disabled + // TEST 6: Import peer token + + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + + // Create a ReconcileCephBlockPool object with the scheme and fake client. + r = &ReconcileCephBlockPool{ + client: cl, + scheme: s, + context: c, + blockPoolChannels: make(map[string]*blockPoolHealth), + } + + peerSecretName := "peer-secret" + pool.Spec.Mirroring.Peers.SecretNames = []string{peerSecretName} + err = r.client.Update(context.TODO(), pool) + assert.NoError(t, err) + res, err = r.Reconcile(ctx, req) + // assert reconcile failure because peer token secert was not created + assert.Error(t, err) + assert.True(t, res.Requeue) + + bootstrapPeerToken := `eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==` //nolint:gosec // This is just a var name, not a real token + peerSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: peerSecretName, + Namespace: namespace, + }, + Data: map[string][]byte{"token": []byte(bootstrapPeerToken), "pool": []byte("goo")}, + Type: k8sutil.RookType, + } + _, err = c.Clientset.CoreV1().Secrets(namespace).Create(ctx, peerSecret, metav1.CreateOptions{}) + assert.NoError(t, err) + res, err = r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + err = r.client.Get(context.TODO(), req.NamespacedName, pool) + assert.NoError(t, err) + + // + // TEST 7: Mirroring disabled r = &ReconcileCephBlockPool{ client: cl, scheme: s, @@ -399,15 +485,18 @@ func TestConfigureRBDStats(t *testing.T) { ) executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) - switch { - case args[0] == "config" && args[1] == "set" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools" && args[4] != "": - return "", nil - case args[0] == "config" && args[1] == "get" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools": - return "", nil - case args[0] == "config" && args[1] == "rm" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools": - return "", nil + if args[0] == "config" && args[2] == "mgr." && args[3] == "mgr/prometheus/rbd_stats_pools" { + if args[1] == "set" && args[4] != "" { + return "", nil + } + if args[1] == "get" { + return "", nil + } + if args[1] == "rm" { + return "", nil + } } return "", errors.Errorf("unexpected arguments %q", args) }, @@ -463,9 +552,9 @@ func TestConfigureRBDStats(t *testing.T) { // Case 5: Two CephBlockPools with EnableRBDStats:false & EnableRBDStats:true. // SetConfig returns an error context.Executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) - return "", errors.New("mock error to simulate failure of SetConfig() function") + return "", errors.New("mock error to simulate failure of mon store Set() function") }, } err = configureRBDStats(context, clusterInfo) diff --git a/pkg/operator/ceph/pool/peers.go b/pkg/operator/ceph/pool/peers.go new file mode 100644 index 000000000000..496a4bcca65e --- /dev/null +++ b/pkg/operator/ceph/pool/peers.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "context" + + "github.com/pkg/errors" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/daemon/ceph/client" + opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func (r *ReconcileCephBlockPool) reconcileAddBoostrapPeer(pool *cephv1.CephBlockPool, + namespacedName types.NamespacedName) (reconcile.Result, error) { + + if pool.Spec.Mirroring.Peers == nil { + return reconcile.Result{}, nil + } + + // List all the peers secret, we can have more than one peer we might want to configure + // For each, get the Kubernetes Secret and import the "peer token" so that we can configure the mirroring + for _, peerSecret := range pool.Spec.Mirroring.Peers.SecretNames { + logger.Debugf("fetching bootstrap peer kubernetes secret %q", peerSecret) + s, err := r.context.Clientset.CoreV1().Secrets(r.clusterInfo.Namespace).Get(context.TODO(), peerSecret, metav1.GetOptions{}) + // We don't care about IsNotFound here, we still need to fail + if err != nil { + return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to fetch kubernetes secret %q bootstrap peer", peerSecret) + } + + // Validate peer secret content + err = opcontroller.ValidatePeerToken(pool, s.Data) + if err != nil { + return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to validate rbd-mirror bootstrap peer secret %q data", peerSecret) + } + + // Import bootstrap peer + err = client.ImportRBDMirrorBootstrapPeer(r.context, r.clusterInfo, pool.Name, string(s.Data["direction"]), s.Data["token"]) + if err != nil { + return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to import bootstrap peer token") + } + } + + return reconcile.Result{}, nil +} diff --git a/pkg/operator/ceph/pool/validate.go b/pkg/operator/ceph/pool/validate.go index f96864627602..44d96c113b65 100644 --- a/pkg/operator/ceph/pool/validate.go +++ b/pkg/operator/ceph/pool/validate.go @@ -139,11 +139,19 @@ func ValidatePoolSpec(context *clusterd.Context, clusterInfo *cephclient.Cluster // validate pool compression mode if specified if p.CompressionMode != "" { - switch p.CompressionMode { - case "none", "passive", "aggressive", "force": - break - default: - return errors.Errorf("unrecognized compression mode %q", p.CompressionMode) + logger.Warning("compressionMode is DEPRECATED, use Parameters instead") + } + + // Test the same for Parameters + if p.Parameters != nil { + compression, ok := p.Parameters[client.CompressionModeProperty] + if ok && compression != "" { + switch compression { + case "none", "passive", "aggressive", "force": + break + default: + return errors.Errorf("failed to validate pool spec unknown compression mode %q", compression) + } } } diff --git a/pkg/operator/ceph/pool/validate_test.go b/pkg/operator/ceph/pool/validate_test.go index 8bd1faf5b1cd..6674c201cc51 100644 --- a/pkg/operator/ceph/pool/validate_test.go +++ b/pkg/operator/ceph/pool/validate_test.go @@ -34,148 +34,170 @@ func TestValidatePool(t *testing.T) { clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} clusterSpec := &cephv1.ClusterSpec{} - // not specifying some replication or EC settings is fine - p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - err := ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("not specifying some replication or EC settings is fine", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // must specify name - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Namespace: clusterInfo.Namespace}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must specify name", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Namespace: clusterInfo.Namespace}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // must specify namespace - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must specify namespace", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool"}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // must not specify both replication and EC settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.ErasureCoded.CodingChunks = 2 - p.Spec.ErasureCoded.DataChunks = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must not specify both replication and EC settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.ErasureCoded.CodingChunks = 2 + p.Spec.ErasureCoded.DataChunks = 3 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // succeed with replication settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("succeed with replication settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // size is 1 and RequireSafeReplicaSize is true - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = true - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("size is 1 and RequireSafeReplicaSize is true", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = true + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // Tests with various compression modes - // succeed with compression mode "none" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "none" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("succeed with ec settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.ErasureCoded.CodingChunks = 1 + p.Spec.ErasureCoded.DataChunks = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // succeed with compression mode "aggressive" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "aggressive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("fail Parameters['compression_mode'] is unknown", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.Parameters = map[string]string{"compression_mode": "foo"} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + assert.EqualError(t, err, "failed to validate pool spec unknown compression mode \"foo\"") + assert.Equal(t, "foo", p.Spec.Parameters["compression_mode"]) + }) - // fail with compression mode "unsupported" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "unsupported" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("success Parameters['compression_mode'] is known", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.Parameters = map[string]string{"compression_mode": "aggressive"} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // fail since replica size is lower than ReplicasPerFailureDomain - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since replica size is lower than ReplicasPerFailureDomain", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.ReplicasPerFailureDomain = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since replica size is equal than ReplicasPerFailureDomain - p.Spec.Replicated.Size = 2 - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since replica size is equal than ReplicasPerFailureDomain", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 2 + p.Spec.Replicated.ReplicasPerFailureDomain = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since ReplicasPerFailureDomain is not a power of 2", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 4 + p.Spec.Replicated.ReplicasPerFailureDomain = 3 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 5 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // Failure the sub domain does not exist - p.Spec.Replicated.SubFailureDomain = "dummy" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec pool and valid compression mode - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - p.Spec.CompressionMode = "passive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("fail since ReplicasPerFailureDomain is not a power of 2", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 4 + p.Spec.Replicated.ReplicasPerFailureDomain = 5 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // Add mirror test mode - { + t.Run("failure the sub domain does not exist", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.SubFailureDomain = "dummy" + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) + + t.Run("succeed with ec pool and valid compression mode", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.ErasureCoded.CodingChunks = 1 + p.Spec.ErasureCoded.DataChunks = 2 + p.Spec.CompressionMode = "passive" + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) + + t.Run("fail unrecognized mirroring mode", func(t *testing.T) { p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} p.Spec.Mirroring.Enabled = true p.Spec.Mirroring.Mode = "foo" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "unrecognized mirroring mode \"foo\". only 'image and 'pool' are supported") + }) - // Success mode is known + t.Run("success known mirroring mode", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true p.Spec.Mirroring.Mode = "pool" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.NoError(t, err) + }) - // Error no interval specified + t.Run("fail mirroring mode no interval specified", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true + p.Spec.Mirroring.Mode = "pool" p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{StartTime: "14:00:00-05:00"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "schedule interval cannot be empty if start time is specified") + }) - // Success we have an interval + t.Run("fail mirroring mode we have a snap interval", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true + p.Spec.Mirroring.Mode = "pool" p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{Interval: "24h"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.NoError(t, err) - } + }) - // Failure and subfailure domains - { + t.Run("failure and subfailure domains", func(t *testing.T) { p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} p.Spec.FailureDomain = "host" p.Spec.Replicated.SubFailureDomain = "host" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "failure and subfailure domain cannot be identical") - } - + }) } func TestValidateCrushProperties(t *testing.T) { diff --git a/pkg/operator/ceph/version/version_test.go b/pkg/operator/ceph/version/version_test.go index d6706752cba6..a97b2eb7637a 100644 --- a/pkg/operator/ceph/version/version_test.go +++ b/pkg/operator/ceph/version/version_test.go @@ -53,24 +53,24 @@ func extractVersionHelper(t *testing.T, text string, major, minor, extra, build func TestExtractVersion(t *testing.T) { // release build - v0c := "ceph version 13.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) mimic (stable)" + v0c := "ceph version 16.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) pacific (stable)" v0d := ` root@7a97f5a78bc6:/# ceph --version -ceph version 13.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) mimic (stable) +ceph version 16.2.6 (ae699615bac534ea496ee965ac6192cb7e0e07c1) pacific (stable) ` - extractVersionHelper(t, v0c, 13, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") - extractVersionHelper(t, v0d, 13, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") + extractVersionHelper(t, v0c, 16, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") + extractVersionHelper(t, v0d, 16, 2, 6, 0, "ae699615bac534ea496ee965ac6192cb7e0e07c1") // development build - v1c := "ceph version 14.1.33-403-g7ba6bece41 (7ba6bece4187eda5d05a9b84211fe6ba8dd287bd) nautilus (rc)" + v1c := "ceph version 16.1.33-403-g7ba6bece41 (7ba6bece4187eda5d05a9b84211fe6ba8dd287bd) pacific (rc)" v1d := ` bin/ceph --version *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** -ceph version 14.1.33-403-g7ba6bece41 +ceph version 16.1.33-403-g7ba6bece41 (7ba6bece4187eda5d05a9b84211fe6ba8dd287bd) nautilus (rc) ` - extractVersionHelper(t, v1c, 14, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") - extractVersionHelper(t, v1d, 14, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") + extractVersionHelper(t, v1c, 16, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") + extractVersionHelper(t, v1d, 16, 1, 33, 403, "7ba6bece4187eda5d05a9b84211fe6ba8dd287bd") // build without git version info. it is possible to build the ceph tree // without a version number, but none of the container builds do this. @@ -78,7 +78,7 @@ ceph version 14.1.33-403-g7ba6bece41 // explicitly adding fine-grained versioning to avoid issues with // release granularity. adding the reverse name-to-version is easy // enough if this ever becomes a need. - v2c := "ceph version Development (no_version) nautilus (rc)" + v2c := "ceph version Development (no_version) pacific (rc)" v2d := ` bin/ceph --version *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** diff --git a/pkg/operator/k8sutil/deployment.go b/pkg/operator/k8sutil/deployment.go index f84ed8ad0393..34d62c0bb4c5 100644 --- a/pkg/operator/k8sutil/deployment.go +++ b/pkg/operator/k8sutil/deployment.go @@ -394,6 +394,34 @@ func DeleteDeployment(clientset kubernetes.Interface, namespace, name string) er return deleteResourceAndWait(namespace, name, "deployment", deleteAction, getAction) } +// GetDeploymentOwnerReference returns an OwnerReference to the deployment that is running the given pod name +func GetDeploymentOwnerReference(clientset kubernetes.Interface, podName, namespace string) (*metav1.OwnerReference, error) { + ctx := context.TODO() + var deploymentRef *metav1.OwnerReference + pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "could not find pod %q in namespace %q to find deployment owner reference", podName, namespace) + } + for _, podOwner := range pod.OwnerReferences { + if podOwner.Kind == "ReplicaSet" { + replicaset, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, podOwner.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "could not find replicaset %q in namespace %q to find deployment owner reference", podOwner.Name, namespace) + } + for _, replicasetOwner := range replicaset.OwnerReferences { + if replicasetOwner.Kind == "Deployment" { + localreplicasetOwner := replicasetOwner + deploymentRef = &localreplicasetOwner + } + } + } + } + if deploymentRef == nil { + return nil, errors.New("could not find owner reference for rook-ceph deployment") + } + return deploymentRef, nil +} + // WaitForDeploymentImage waits for all deployments with the given labels are running. // WARNING:This is currently only useful for testing! func WaitForDeploymentImage(clientset kubernetes.Interface, namespace, label, container string, initContainer bool, desiredImage string) error { diff --git a/pkg/operator/k8sutil/job.go b/pkg/operator/k8sutil/job.go index acfb27045ca0..ad0e50686227 100644 --- a/pkg/operator/k8sutil/job.go +++ b/pkg/operator/k8sutil/job.go @@ -51,7 +51,7 @@ func RunReplaceableJob(clientset kubernetes.Interface, job *batch.Job, deleteIfF logger.Infof("Removing previous job %s to start a new one", job.Name) err := DeleteBatchJob(clientset, job.Namespace, existingJob.Name, true) if err != nil { - logger.Warningf("failed to remove job %s. %+v", job.Name, err) + return fmt.Errorf("failed to remove job %s. %+v", job.Name, err) } } @@ -103,8 +103,10 @@ func DeleteBatchJob(clientset kubernetes.Interface, namespace, name string, wait return nil } - retries := 20 - sleepInterval := 2 * time.Second + // Retry for the job to be deleted for 90s. A pod can easily take 60s to timeout before + // deletion so we add some buffer to that time. + retries := 30 + sleepInterval := 3 * time.Second for i := 0; i < retries; i++ { _, err := clientset.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { diff --git a/pkg/operator/k8sutil/prometheus.go b/pkg/operator/k8sutil/prometheus.go index cf27f074f013..b4aafbd77b5c 100644 --- a/pkg/operator/k8sutil/prometheus.go +++ b/pkg/operator/k8sutil/prometheus.go @@ -80,6 +80,7 @@ func CreateOrUpdateServiceMonitor(serviceMonitorDefinition *monitoringv1.Service return nil, fmt.Errorf("failed to retrieve servicemonitor. %v", err) } oldSm.Spec = serviceMonitorDefinition.Spec + oldSm.ObjectMeta.Labels = serviceMonitorDefinition.ObjectMeta.Labels sm, err := client.MonitoringV1().ServiceMonitors(namespace).Update(ctx, oldSm, metav1.UpdateOptions{}) if err != nil { return nil, fmt.Errorf("failed to update servicemonitor. %v", err) @@ -123,6 +124,7 @@ func CreateOrUpdatePrometheusRule(prometheusRule *monitoringv1.PrometheusRule) ( return nil, fmt.Errorf("failed to get prometheusRule object. %v", err) } promRule.Spec = prometheusRule.Spec + promRule.ObjectMeta.Labels = prometheusRule.ObjectMeta.Labels _, err = client.MonitoringV1().PrometheusRules(namespace).Update(ctx, promRule, metav1.UpdateOptions{}) if err != nil { return nil, fmt.Errorf("failed to update prometheusRule. %v", err) diff --git a/pkg/operator/k8sutil/prometheus_test.go b/pkg/operator/k8sutil/prometheus_test.go index db9ad42cb439..5910bb5a6081 100644 --- a/pkg/operator/k8sutil/prometheus_test.go +++ b/pkg/operator/k8sutil/prometheus_test.go @@ -32,6 +32,7 @@ func TestGetServiceMonitor(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "rook-ceph-mgr", servicemonitor.GetName()) assert.Equal(t, "rook-ceph", servicemonitor.GetNamespace()) + assert.NotNil(t, servicemonitor.GetLabels()) assert.NotNil(t, servicemonitor.Spec.NamespaceSelector.MatchNames) assert.NotNil(t, servicemonitor.Spec.Endpoints) } diff --git a/pkg/operator/k8sutil/resources.go b/pkg/operator/k8sutil/resources.go index 87ec21bf30d1..18f5e9040234 100644 --- a/pkg/operator/k8sutil/resources.go +++ b/pkg/operator/k8sutil/resources.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -85,11 +86,32 @@ func (info *OwnerInfo) SetOwnerReference(object metav1.Object) error { if err != nil { return err } - ownerRefs := append(object.GetOwnerReferences(), *info.ownerRef) + ownerRefs := object.GetOwnerReferences() + for _, v := range ownerRefs { + if referSameObject(v, *info.ownerRef) { + return nil + } + } + ownerRefs = append(ownerRefs, *info.ownerRef) object.SetOwnerReferences(ownerRefs) return nil } +// The original code is in https://github.com/kubernetes-sigs/controller-runtime/blob/a905949b9040084f0c6d2a27ec70e77c3c5c0931/pkg/controller/controllerutil/controllerutil.go#L160 +func referSameObject(a, b metav1.OwnerReference) bool { + groupVersionA, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + groupVersionB, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return groupVersionA.Group == groupVersionB.Group && a.Kind == b.Kind && a.Name == b.Name +} + // SetControllerReference set the controller reference of object func (info *OwnerInfo) SetControllerReference(object metav1.Object) error { if info.owner != nil { diff --git a/pkg/operator/k8sutil/resources_test.go b/pkg/operator/k8sutil/resources_test.go index fe109fbe17c6..fc47c4fed186 100644 --- a/pkg/operator/k8sutil/resources_test.go +++ b/pkg/operator/k8sutil/resources_test.go @@ -142,3 +142,24 @@ func TestValidateController(t *testing.T) { err = newOwnerInfo.validateController(object) assert.Error(t, err) } + +func TestSetOwnerReference(t *testing.T) { + info := OwnerInfo{ + ownerRef: &metav1.OwnerReference{Name: "test-id"}, + } + object := v1.ConfigMap{} + err := info.SetOwnerReference(&object) + assert.NoError(t, err) + assert.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{*info.ownerRef}) + + err = info.SetOwnerReference(&object) + assert.NoError(t, err) + assert.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{*info.ownerRef}) + + info2 := OwnerInfo{ + ownerRef: &metav1.OwnerReference{Name: "test-id-2"}, + } + err = info2.SetOwnerReference(&object) + assert.NoError(t, err) + assert.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{*info.ownerRef, *info2.ownerRef}) +} diff --git a/pkg/operator/nfs/controller.go b/pkg/operator/nfs/controller.go deleted file mode 100644 index 68a7c42c4437..000000000000 --- a/pkg/operator/nfs/controller.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "fmt" - "path" - "strings" - "time" - - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - - "github.com/coreos/pkg/capnslog" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - nfsConfigMapPath = "/nfs-ganesha/config" - nfsPort = 2049 - rpcPort = 111 -) - -type NFSServerReconciler struct { - client.Client - Context *clusterd.Context - Scheme *runtime.Scheme - Log *capnslog.PackageLogger - Recorder record.EventRecorder -} - -func (r *NFSServerReconciler) Reconcile(context context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - - instance := &nfsv1alpha1.NFSServer{} - if err := r.Client.Get(context, req.NamespacedName, instance); err != nil { - if errors.IsNotFound(err) { - return reconcile.Result{}, nil - } - - return reconcile.Result{}, err - } - - // Initialize patcher utility and store the initial cr object state to be compare later. - patcher, err := k8sutil.NewPatcher(instance, r.Client) - if err != nil { - return reconcile.Result{}, err - } - - defer func() { - // Always patch the cr object if any changes at the end of each reconciliation. - if err := patcher.Patch(context, instance); err != nil && reterr == nil { - reterr = err - } - }() - - // Add Finalizer if not present - controllerutil.AddFinalizer(instance, nfsv1alpha1.Finalizer) - - // Handle for deletion. Just remove finalizer - if !instance.DeletionTimestamp.IsZero() { - r.Log.Infof("Deleting NFSServer %s in %s namespace", instance.Name, instance.Namespace) - - // no operation since we don't need do anything when nfsserver deleted. - controllerutil.RemoveFinalizer(instance, nfsv1alpha1.Finalizer) - } - - // Check status state. if it's empty then initialize it - // otherwise if has error state then skip reconciliation to prevent requeue on error. - switch instance.Status.State { - case "": - instance.Status.State = nfsv1alpha1.StateInitializing - r.Log.Info("Initialize status state") - return reconcile.Result{Requeue: true}, nil - case nfsv1alpha1.StateError: - r.Log.Info("Error state detected, skip reconciliation") - return reconcile.Result{Requeue: false}, nil - } - - // Validate cr spec and give warning event when validation fail. - if err := instance.ValidateSpec(); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Invalid NFSServer spec: %+v", err) - r.Log.Errorf("Invalid NFSServer spec: %+v", err) - instance.Status.State = nfsv1alpha1.StateError - return reconcile.Result{}, err - } - - if err := r.reconcileNFSServerConfig(context, instance); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Failed reconciling nfsserver config: %+v", err) - r.Log.Errorf("Error reconciling nfsserver config: %+v", err) - return reconcile.Result{}, err - } - - if err := r.reconcileNFSServer(context, instance); err != nil { - r.Recorder.Eventf(instance, corev1.EventTypeWarning, nfsv1alpha1.EventFailed, "Failed reconciling nfsserver: %+v", err) - r.Log.Errorf("Error reconciling nfsserver: %+v", err) - return reconcile.Result{}, err - } - - // Reconcile status state based on statefulset ready replicas. - sts := &appsv1.StatefulSet{} - if err := r.Client.Get(context, req.NamespacedName, sts); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - switch int(sts.Status.ReadyReplicas) { - case instance.Spec.Replicas: - instance.Status.State = nfsv1alpha1.StateRunning - return reconcile.Result{}, nil - default: - instance.Status.State = nfsv1alpha1.StatePending - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } -} - -func (r *NFSServerReconciler) reconcileNFSServerConfig(ctx context.Context, cr *nfsv1alpha1.NFSServer) error { - var exportsList []string - - id := 10 - for _, export := range cr.Spec.Exports { - claimName := export.PersistentVolumeClaim.ClaimName - var accessType string - // validateNFSServerSpec guarantees `access` will be one of these values at this point - switch strings.ToLower(export.Server.AccessMode) { - case "readwrite": - accessType = "RW" - case "readonly": - accessType = "RO" - case "none": - accessType = "None" - } - - nfsGaneshaConfig := ` -EXPORT { - Export_Id = ` + fmt.Sprintf("%v", id) + `; - Path = ` + path.Join("/", claimName) + `; - Pseudo = ` + path.Join("/", claimName) + `; - Protocols = 4; - Transports = TCP; - Sectype = sys; - Access_Type = ` + accessType + `; - Squash = ` + strings.ToLower(export.Server.Squash) + `; - FSAL { - Name = VFS; - } -}` - - exportsList = append(exportsList, nfsGaneshaConfig) - id++ - } - - nfsGaneshaAdditionalConfig := ` -NFS_Core_Param { - fsid_device = true; -} -` - - exportsList = append(exportsList, nfsGaneshaAdditionalConfig) - configdata := make(map[string]string) - configdata[cr.Name] = strings.Join(exportsList, "\n") - cm := newConfigMapForNFSServer(cr) - cmop, err := controllerutil.CreateOrUpdate(ctx, r.Client, cm, func() error { - if err := controllerutil.SetOwnerReference(cr, cm, r.Scheme); err != nil { - return err - } - - cm.Data = configdata - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer ConfigMap", "Operation.Result ", cmop) - switch cmop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server config configmap: %s", strings.Title(string(cmop)), cm.Name) - return nil - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server config configmap: %s", strings.Title(string(cmop)), cm.Name) - return nil - default: - return nil - } -} - -func (r *NFSServerReconciler) reconcileNFSServer(ctx context.Context, cr *nfsv1alpha1.NFSServer) error { - svc := newServiceForNFSServer(cr) - svcop, err := controllerutil.CreateOrUpdate(ctx, r.Client, svc, func() error { - if !svc.ObjectMeta.CreationTimestamp.IsZero() { - return nil - } - - if err := controllerutil.SetControllerReference(cr, svc, r.Scheme); err != nil { - return err - } - - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer Service", "Operation.Result ", svcop) - switch svcop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server service: %s", strings.Title(string(svcop)), svc.Name) - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server service: %s", strings.Title(string(svcop)), svc.Name) - } - - sts, err := newStatefulSetForNFSServer(cr, r.Context.Clientset, ctx) - if err != nil { - return fmt.Errorf("unable to generate the NFS StatefulSet spec: %v", err) - } - - stsop, err := controllerutil.CreateOrUpdate(ctx, r.Client, sts, func() error { - if sts.ObjectMeta.CreationTimestamp.IsZero() { - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: newLabels(cr), - } - } - - if err := controllerutil.SetControllerReference(cr, sts, r.Scheme); err != nil { - return err - } - - volumes := []corev1.Volume{ - { - Name: cr.Name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: cr.Name, - }, - Items: []corev1.KeyToPath{ - { - Key: cr.Name, - Path: cr.Name, - }, - }, - DefaultMode: pointer.Int32Ptr(corev1.ConfigMapVolumeSourceDefaultMode), - }, - }, - }, - } - volumeMounts := []corev1.VolumeMount{ - { - Name: cr.Name, - MountPath: nfsConfigMapPath, - }, - } - for _, export := range cr.Spec.Exports { - shareName := export.Name - claimName := export.PersistentVolumeClaim.ClaimName - volumes = append(volumes, corev1.Volume{ - Name: shareName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: shareName, - MountPath: path.Join("/", claimName), - }) - } - - sts.Spec.Template.Spec.Volumes = volumes - for i, container := range sts.Spec.Template.Spec.Containers { - if container.Name == "nfs-server" || container.Name == "nfs-provisioner" { - sts.Spec.Template.Spec.Containers[i].VolumeMounts = volumeMounts - } - } - - return nil - }) - - if err != nil { - return err - } - - r.Log.Info("Reconciling NFSServer StatefulSet", "Operation.Result ", stsop) - switch stsop { - case controllerutil.OperationResultCreated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventCreated, "%s nfs-server statefulset: %s", strings.Title(string(stsop)), sts.Name) - return nil - case controllerutil.OperationResultUpdated: - r.Recorder.Eventf(cr, corev1.EventTypeNormal, nfsv1alpha1.EventUpdated, "%s nfs-server statefulset: %s", strings.Title(string(stsop)), sts.Name) - return nil - default: - return nil - } -} diff --git a/pkg/operator/nfs/controller_test.go b/pkg/operator/nfs/controller_test.go deleted file mode 100644 index 67159efff1c0..000000000000 --- a/pkg/operator/nfs/controller_test.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "os" - "path" - "reflect" - "testing" - - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/clusterd" - "github.com/rook/rook/pkg/operator/k8sutil" - "github.com/rook/rook/pkg/operator/test" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type resourceGenerator interface { - WithExports(exportName, serverAccessMode, serverSquashType, pvcName string) resourceGenerator - WithState(state nfsv1alpha1.NFSServerState) resourceGenerator - Generate() *nfsv1alpha1.NFSServer -} - -type resource struct { - name string - namespace string - exports []nfsv1alpha1.ExportsSpec - state nfsv1alpha1.NFSServerState -} - -func newCustomResource(namespacedName types.NamespacedName) resourceGenerator { - return &resource{ - name: namespacedName.Name, - namespace: namespacedName.Namespace, - } -} - -func (r *resource) WithExports(exportName, serverAccessMode, serverSquashType, pvcName string) resourceGenerator { - r.exports = append(r.exports, nfsv1alpha1.ExportsSpec{ - Name: exportName, - Server: nfsv1alpha1.ServerSpec{ - AccessMode: serverAccessMode, - Squash: serverSquashType, - }, - PersistentVolumeClaim: corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }) - - return r -} - -func (r *resource) WithState(state nfsv1alpha1.NFSServerState) resourceGenerator { - r.state = state - return r -} - -func (r *resource) Generate() *nfsv1alpha1.NFSServer { - return &nfsv1alpha1.NFSServer{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.name, - Namespace: r.namespace, - }, - Spec: nfsv1alpha1.NFSServerSpec{ - Replicas: 1, - Exports: r.exports, - }, - Status: nfsv1alpha1.NFSServerStatus{ - State: r.state, - }, - } -} - -func TestNFSServerReconciler_Reconcile(t *testing.T) { - os.Setenv(k8sutil.PodNamespaceEnvVar, "rook-system") - defer os.Unsetenv(k8sutil.PodNamespaceEnvVar) - - os.Setenv(k8sutil.PodNameEnvVar, "rook-operator") - defer os.Unsetenv(k8sutil.PodNameEnvVar) - - ctx := context.TODO() - clientset := test.New(t, 3) - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-operator", - Namespace: "rook-system", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "mypodContainer", - Image: "rook/test", - }, - }, - }, - } - _, err := clientset.CoreV1().Pods(pod.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) - if err != nil { - t.Errorf("Error creating the rook-operator pod: %v", err) - } - clusterdContext := &clusterd.Context{Clientset: clientset} - - expectedServerFunc := func(scheme *runtime.Scheme, cr *nfsv1alpha1.NFSServer) *appsv1.StatefulSet { - sts, err := newStatefulSetForNFSServer(cr, clientset, ctx) - if err != nil { - t.Errorf("Error creating the expectedServerFunc: %v", err) - return nil - } - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: newLabels(cr), - } - _ = controllerutil.SetControllerReference(cr, sts, scheme) - volumes := []corev1.Volume{ - { - Name: cr.Name, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: cr.Name, - }, - Items: []corev1.KeyToPath{ - { - Key: cr.Name, - Path: cr.Name, - }, - }, - DefaultMode: pointer.Int32Ptr(corev1.ConfigMapVolumeSourceDefaultMode), - }, - }, - }, - } - volumeMounts := []corev1.VolumeMount{ - { - Name: cr.Name, - MountPath: nfsConfigMapPath, - }, - } - for _, export := range cr.Spec.Exports { - shareName := export.Name - claimName := export.PersistentVolumeClaim.ClaimName - volumes = append(volumes, corev1.Volume{ - Name: shareName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, - }, - }) - - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: shareName, - MountPath: path.Join("/", claimName), - }) - } - sts.Status.ReadyReplicas = int32(cr.Spec.Replicas) - sts.Spec.Template.Spec.Volumes = volumes - for i, container := range sts.Spec.Template.Spec.Containers { - if container.Name == "nfs-server" || container.Name == "nfs-provisioner" { - sts.Spec.Template.Spec.Containers[i].VolumeMounts = volumeMounts - } - } - - return sts - } - - expectedServerServiceFunc := func(scheme *runtime.Scheme, cr *nfsv1alpha1.NFSServer) *corev1.Service { - svc := newServiceForNFSServer(cr) - _ = controllerutil.SetControllerReference(cr, svc, scheme) - return svc - } - - rr := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "nfs-server", - Namespace: "nfs-server", - }, - } - - type args struct { - req ctrl.Request - } - tests := []struct { - name string - args args - cr *nfsv1alpha1.NFSServer - want ctrl.Result - wantErr bool - }{ - { - name: "Reconcile NFS Server Should Set Initializing State when State is Empty", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").Generate(), - want: reconcile.Result{Requeue: true}, - }, - { - name: "Reconcile NFS Server Shouldn't Requeue when State is Error", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateError).Generate(), - want: reconcile.Result{Requeue: false}, - }, - { - name: "Reconcile NFS Server Should Error on Duplicate Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - wantErr: true, - }, - { - name: "Reconcile NFS Server With Single Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - }, - { - name: "Reconcile NFS Server With Multiple Export", - args: args{ - req: rr, - }, - cr: newCustomResource(rr.NamespacedName).WithExports("share1", "ReadWrite", "none", "test-claim").WithExports("share2", "ReadOnly", "none", "another-test-claim").WithState(nfsv1alpha1.StateInitializing).Generate(), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scheme := clientgoscheme.Scheme - scheme.AddKnownTypes(nfsv1alpha1.SchemeGroupVersion, tt.cr) - - expectedServer := expectedServerFunc(scheme, tt.cr) - expectedServerService := expectedServerServiceFunc(scheme, tt.cr) - - objs := []runtime.Object{ - tt.cr, - expectedServer, - expectedServerService, - } - - expectedServer.GetObjectKind().SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) - expectedServerService.GetObjectKind().SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - - fc := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() - fr := record.NewFakeRecorder(2) - - r := &NFSServerReconciler{ - Context: clusterdContext, - Client: fc, - Scheme: scheme, - Log: logger, - Recorder: fr, - } - got, err := r.Reconcile(context.TODO(), tt.args.req) - if (err != nil) != tt.wantErr { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", got, tt.want) - } - - gotServer := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), tt.args.req.NamespacedName, gotServer); err != nil { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(gotServer, expectedServer) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", gotServer, expectedServer) - } - - gotServerService := &corev1.Service{} - if err := fc.Get(context.Background(), tt.args.req.NamespacedName, gotServerService); err != nil { - t.Errorf("NFSServerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(gotServerService, expectedServerService) { - t.Errorf("NFSServerReconciler.Reconcile() = %v, want %v", gotServerService, expectedServerService) - } - }) - } -} diff --git a/pkg/operator/nfs/operator.go b/pkg/operator/nfs/operator.go deleted file mode 100644 index 36c53408747d..000000000000 --- a/pkg/operator/nfs/operator.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nfs operator to manage NFS Server. -package nfs - -import ( - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - - "github.com/coreos/pkg/capnslog" - "github.com/rook/rook/pkg/clusterd" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" -) - -var ( - scheme = runtime.NewScheme() - controllerName = "nfs-operator" - logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) -) - -// Operator type for managing NFS Server. -type Operator struct { - context *clusterd.Context -} - -func init() { - _ = clientgoscheme.AddToScheme(scheme) - _ = nfsv1alpha1.AddToScheme(scheme) -} - -// New creates an operator instance. -func New(context *clusterd.Context) *Operator { - return &Operator{ - context: context, - } -} - -// Run the operator instance. -func (o *Operator) Run() error { - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - }) - if err != nil { - return err - } - - reconciler := &NFSServerReconciler{ - Client: mgr.GetClient(), - Context: o.context, - Log: logger, - Scheme: scheme, - Recorder: mgr.GetEventRecorderFor(controllerName), - } - - if err := ctrl.NewControllerManagedBy(mgr). - For(&nfsv1alpha1.NFSServer{}). - Complete(reconciler); err != nil { - return err - } - - logger.Info("starting manager") - return mgr.Start(ctrl.SetupSignalHandler()) -} diff --git a/pkg/operator/nfs/provisioner.go b/pkg/operator/nfs/provisioner.go deleted file mode 100644 index 5cf5b76df82a..000000000000 --- a/pkg/operator/nfs/provisioner.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/pkg/errors" - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/component-helpers/storage/volume" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - nfsServerNameSCParam = "nfsServerName" - nfsServerNamespaceSCParam = "nfsServerNamespace" - exportNameSCParam = "exportName" - projectBlockAnnotationKey = "nfs.rook.io/project_block" -) - -var ( - mountPath = "/" -) - -type Provisioner struct { - client kubernetes.Interface - rookClient rookclient.Interface - quotaer Quotaer -} - -var _ controller.Provisioner = &Provisioner{} - -// NewNFSProvisioner returns an instance of nfsProvisioner -func NewNFSProvisioner(clientset kubernetes.Interface, rookClientset rookclient.Interface) (*Provisioner, error) { - quotaer, err := NewProjectQuota() - if err != nil { - return nil, err - } - - return &Provisioner{ - client: clientset, - rookClient: rookClientset, - quotaer: quotaer, - }, nil -} - -// Provision(context.Context, ProvisionOptions) (*v1.PersistentVolume, ProvisioningState, error) -func (p *Provisioner) Provision(ctx context.Context, options controller.ProvisionOptions) (*v1.PersistentVolume, controller.ProvisioningState, error) { - logger.Infof("nfs provisioner: ProvisionOptions %v", options) - annotations := make(map[string]string) - - if options.PVC.Spec.Selector != nil { - return nil, controller.ProvisioningFinished, fmt.Errorf("claim Selector is not supported") - } - - sc, err := p.storageClassForPVC(ctx, options.PVC) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - serverName, present := sc.Parameters[nfsServerNameSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - serverNamespace, present := sc.Parameters[nfsServerNamespaceSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - exportName, present := sc.Parameters[exportNameSCParam] - if !present { - return nil, controller.ProvisioningFinished, errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - nfsserver, err := p.rookClient.NfsV1alpha1().NFSServers(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - nfsserversvc, err := p.client.CoreV1().Services(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - var ( - exportPath string - found bool - ) - - for _, export := range nfsserver.Spec.Exports { - if export.Name == exportName { - exportPath = path.Join(mountPath, export.PersistentVolumeClaim.ClaimName) - found = true - } - } - - if !found { - return nil, controller.ProvisioningFinished, fmt.Errorf("No export name from storageclass is match with NFSServer %s in namespace %s", nfsserver.Name, nfsserver.Namespace) - } - - pvName := strings.Join([]string{options.PVC.Namespace, options.PVC.Name, options.PVName}, "-") - fullPath := path.Join(exportPath, pvName) - if err := os.MkdirAll(fullPath, 0700); err != nil { - return nil, controller.ProvisioningFinished, errors.New("unable to create directory to provision new pv: " + err.Error()) - } - - capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - block, err := p.createQuota(exportPath, fullPath, strconv.FormatInt(capacity.Value(), 10)) - if err != nil { - return nil, controller.ProvisioningFinished, err - } - - annotations[projectBlockAnnotationKey] = block - - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: options.PVName, - Annotations: annotations, - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy, - AccessModes: options.PVC.Spec.AccessModes, - MountOptions: options.StorageClass.MountOptions, - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): capacity, - }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: nfsserversvc.Spec.ClusterIP, - Path: fullPath, - ReadOnly: false, - }, - }, - }, - } - - return pv, controller.ProvisioningFinished, nil -} - -func (p *Provisioner) Delete(ctx context.Context, volume *v1.PersistentVolume) error { - nfsPath := volume.Spec.PersistentVolumeSource.NFS.Path - pvName := path.Base(nfsPath) - - sc, err := p.storageClassForPV(ctx, volume) - if err != nil { - return err - } - - serverName, present := sc.Parameters[nfsServerNameSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - serverNamespace, present := sc.Parameters[nfsServerNamespaceSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - exportName, present := sc.Parameters[exportNameSCParam] - if !present { - return errors.Errorf("NFS share Path not found in the storageclass: %v", sc.GetName()) - } - - nfsserver, err := p.rookClient.NfsV1alpha1().NFSServers(serverNamespace).Get(ctx, serverName, metav1.GetOptions{}) - if err != nil { - return err - } - - var ( - exportPath string - found bool - ) - - for _, export := range nfsserver.Spec.Exports { - if export.Name == exportName { - exportPath = path.Join(mountPath, export.PersistentVolumeClaim.ClaimName) - found = true - } - } - - if !found { - return fmt.Errorf("No export name from storageclass is match with NFSServer %s in namespace %s", nfsserver.Name, nfsserver.Namespace) - } - - block, ok := volume.Annotations[projectBlockAnnotationKey] - if !ok { - return fmt.Errorf("PV doesn't have an annotation with key %s", projectBlockAnnotationKey) - } - - if err := p.removeQuota(exportPath, block); err != nil { - return err - } - - fullPath := path.Join(exportPath, pvName) - return os.RemoveAll(fullPath) -} - -func (p *Provisioner) createQuota(exportPath, directory string, limit string) (string, error) { - projectsFile := filepath.Join(exportPath, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - return "", nil - } - - return "", fmt.Errorf("error checking projects file in directory %s: %v", exportPath, err) - } - - return p.quotaer.CreateProjectQuota(projectsFile, directory, limit) -} - -func (p *Provisioner) removeQuota(exportPath, block string) error { - var projectID uint16 - projectsFile := filepath.Join(exportPath, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - return nil - } - - return fmt.Errorf("error checking projects file in directory %s: %v", exportPath, err) - } - - re := regexp.MustCompile("(?m:^([0-9]+):(.+):(.+)$)") - allMatches := re.FindAllStringSubmatch(block, -1) - for _, match := range allMatches { - digits := match[1] - if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil { - projectID = uint16(id) - } - } - - return p.quotaer.RemoveProjectQuota(projectID, projectsFile, block) -} - -func (p *Provisioner) storageClassForPV(ctx context.Context, pv *v1.PersistentVolume) (*storagev1.StorageClass, error) { - if p.client == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - className := volume.GetPersistentVolumeClass(pv) - if className == "" { - return nil, fmt.Errorf("Volume has no storage class") - } - - return p.client.StorageV1().StorageClasses().Get(ctx, className, metav1.GetOptions{}) -} - -func (p *Provisioner) storageClassForPVC(ctx context.Context, pvc *v1.PersistentVolumeClaim) (*storagev1.StorageClass, error) { - if p.client == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - className := volume.GetPersistentVolumeClaimClass(pvc) - if className == "" { - return nil, fmt.Errorf("Volume has no storage class") - } - - return p.client.StorageV1().StorageClasses().Get(ctx, className, metav1.GetOptions{}) -} diff --git a/pkg/operator/nfs/provisioner_test.go b/pkg/operator/nfs/provisioner_test.go deleted file mode 100644 index 8c71f994fccc..000000000000 --- a/pkg/operator/nfs/provisioner_test.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - "os" - "reflect" - "testing" - - rookclient "github.com/rook/rook/pkg/client/clientset/versioned" - rookclientfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apiresource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - k8sclientfake "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/controller" -) - -func init() { - mountPath = "/tmp/test-rook-nfs" -} - -func newDummyStorageClass(name string, nfsServerNamespacedName types.NamespacedName, reclaimPolicy corev1.PersistentVolumeReclaimPolicy) *storagev1.StorageClass { - return &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Parameters: map[string]string{ - nfsServerNameSCParam: nfsServerNamespacedName.Name, - nfsServerNamespaceSCParam: nfsServerNamespacedName.Namespace, - exportNameSCParam: name, - }, - ReclaimPolicy: &reclaimPolicy, - } -} - -func newDummyPVC(name, namespace string, capacity apiresource.Quantity, storageClassName string) *corev1.PersistentVolumeClaim { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceName(corev1.ResourceStorage): capacity, - }, - }, - StorageClassName: &storageClassName, - }, - } -} - -func newDummyPV(name, scName, expectedPath string, expectedCapacity apiresource.Quantity, expectedReclaimPolicy corev1.PersistentVolumeReclaimPolicy) *corev1.PersistentVolume { - annotations := make(map[string]string) - annotations[projectBlockAnnotationKey] = "" - return &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: annotations, - }, - Spec: corev1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: expectedReclaimPolicy, - Capacity: corev1.ResourceList{ - corev1.ResourceName(corev1.ResourceStorage): expectedCapacity, - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - NFS: &corev1.NFSVolumeSource{ - Path: expectedPath, - }, - }, - StorageClassName: scName, - }, - } -} - -func TestProvisioner_Provision(t *testing.T) { - ctx := context.TODO() - if err := os.MkdirAll(mountPath, 0755); err != nil { - t.Error("error creating test provisioner directory") - } - - defer os.RemoveAll(mountPath) - - fakeQuoater, err := NewFakeProjectQuota() - if err != nil { - t.Error(err) - } - - nfsserver := newCustomResource(types.NamespacedName{Name: "test-nfsserver", Namespace: "test-nfsserver"}).WithExports("share-1", "ReadWrite", "none", "test-claim").Generate() - - type fields struct { - client kubernetes.Interface - rookClient rookclient.Interface - quoater Quotaer - } - type args struct { - options controller.ProvisionOptions - } - tests := []struct { - name string - fields fields - args args - want *corev1.PersistentVolume - wantErr bool - }{ - { - name: "success create volume", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - quoater: fakeQuoater, - }, - args: args{ - options: controller.ProvisionOptions{ - StorageClass: newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - PVName: "share-1-pvc", - PVC: newDummyPVC("share-1-pvc", "default", apiresource.MustParse("1Mi"), "share-1"), - }, - }, - want: newDummyPV("share-1-pvc", "", "/tmp/test-rook-nfs/test-claim/default-share-1-pvc-share-1-pvc", apiresource.MustParse("1Mi"), corev1.PersistentVolumeReclaimDelete), - }, - { - name: "no matching export", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("foo", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - }, - args: args{ - options: controller.ProvisionOptions{ - StorageClass: newDummyStorageClass("foo", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - PVName: "share-1-pvc", - PVC: newDummyPVC("share-1-pvc", "default", apiresource.MustParse("1Mi"), "foo"), - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &Provisioner{ - client: tt.fields.client, - rookClient: tt.fields.rookClient, - quotaer: tt.fields.quoater, - } - got, _, err := p.Provision(ctx, tt.args.options) - if (err != nil) != tt.wantErr { - t.Errorf("Provisioner.Provision() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Provisioner.Provision() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestProvisioner_Delete(t *testing.T) { - ctx := context.TODO() - if err := os.MkdirAll(mountPath, 0755); err != nil { - t.Error("error creating test provisioner directory") - } - - defer os.RemoveAll(mountPath) - - fakeQuoater, err := NewFakeProjectQuota() - if err != nil { - t.Error(err) - } - - nfsserver := newCustomResource(types.NamespacedName{Name: "test-nfsserver", Namespace: "test-nfsserver"}).WithExports("share-1", "ReadWrite", "none", "test-claim").Generate() - type fields struct { - client kubernetes.Interface - rookClient rookclient.Interface - quoater Quotaer - } - type args struct { - volume *corev1.PersistentVolume - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "success delete volume", - fields: fields{ - client: k8sclientfake.NewSimpleClientset( - newServiceForNFSServer(nfsserver), - newDummyStorageClass("share-1", types.NamespacedName{Name: nfsserver.Name, Namespace: nfsserver.Namespace}, corev1.PersistentVolumeReclaimDelete), - ), - rookClient: rookclientfake.NewSimpleClientset( - nfsserver, - ), - quoater: fakeQuoater, - }, - args: args{ - volume: newDummyPV("share-1-pvc", "share-1", "/tmp/test-rook-nfs/test-claim/default-share-1-pvc-share-1-pvc", apiresource.MustParse("1Mi"), corev1.PersistentVolumeReclaimDelete), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &Provisioner{ - client: tt.fields.client, - rookClient: tt.fields.rookClient, - quotaer: tt.fields.quoater, - } - if err := p.Delete(ctx, tt.args.volume); (err != nil) != tt.wantErr { - t.Errorf("Provisioner.Delete() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/operator/nfs/quota.go b/pkg/operator/nfs/quota.go deleted file mode 100644 index 9881b4183bf4..000000000000 --- a/pkg/operator/nfs/quota.go +++ /dev/null @@ -1,264 +0,0 @@ -package nfs - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - - "sigs.k8s.io/sig-storage-lib-external-provisioner/v6/mount" -) - -type Quotaer interface { - CreateProjectQuota(projectsFile, directory, limit string) (string, error) - RemoveProjectQuota(projectID uint16, projectsFile, block string) error - RestoreProjectQuota() error -} - -type Quota struct { - mutex *sync.Mutex - projectsIDs map[string]map[uint16]bool -} - -func NewProjectQuota() (Quotaer, error) { - projectsIDs := map[string]map[uint16]bool{} - mountEntries, err := findProjectQuotaMount() - if err != nil { - return nil, err - } - - for _, entry := range mountEntries { - exportName := filepath.Base(entry.Mountpoint) - projectsIDs[exportName] = map[uint16]bool{} - projectsFile := filepath.Join(entry.Mountpoint, "projects") - _, err := os.Stat(projectsFile) - if os.IsNotExist(err) { - logger.Infof("creating new project file %s", projectsFile) - file, cerr := os.Create(projectsFile) - if cerr != nil { - return nil, fmt.Errorf("error creating xfs projects file %s: %v", projectsFile, cerr) - } - - if err := file.Close(); err != nil { - return nil, err - } - } else { - logger.Infof("found project file %s, restoring project ids", projectsFile) - re := regexp.MustCompile("(?m:^([0-9]+):/.+$)") - projectIDs, err := restoreProjectIDs(projectsFile, re) - if err != nil { - logger.Errorf("error while populating projectIDs map, there may be errors setting quotas later if projectIDs are reused: %v", err) - } - - projectsIDs[exportName] = projectIDs - } - } - - quota := &Quota{ - mutex: &sync.Mutex{}, - projectsIDs: projectsIDs, - } - - if err := quota.RestoreProjectQuota(); err != nil { - return nil, err - } - - return quota, nil -} - -func findProjectQuotaMount() ([]*mount.Info, error) { - var entries []*mount.Info - allEntries, err := mount.GetMounts() - if err != nil { - return nil, err - } - - for _, entry := range allEntries { - // currently we only support xfs - if entry.Fstype != "xfs" { - continue - } - - if filepath.Dir(entry.Mountpoint) == mountPath && (strings.Contains(entry.VfsOpts, "pquota") || strings.Contains(entry.VfsOpts, "prjquota")) { - entries = append(entries, entry) - } - } - - return entries, nil -} - -func restoreProjectIDs(projectsFile string, re *regexp.Regexp) (map[uint16]bool, error) { - ids := map[uint16]bool{} - digitsRe := "([0-9]+)" - if !strings.Contains(re.String(), digitsRe) { - return ids, fmt.Errorf("regexp %s doesn't contain digits submatch %s", re.String(), digitsRe) - } - - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - return ids, err - } - - allMatches := re.FindAllSubmatch(read, -1) - for _, match := range allMatches { - digits := match[1] - if id, err := strconv.ParseUint(string(digits), 10, 16); err == nil { - ids[uint16(id)] = true - } - } - - return ids, nil -} - -func (q *Quota) CreateProjectQuota(projectsFile, directory, limit string) (string, error) { - exportName := filepath.Base(filepath.Dir(projectsFile)) - - q.mutex.Lock() - projectID := uint16(1) - for ; projectID < math.MaxUint16; projectID++ { - if _, ok := q.projectsIDs[exportName][projectID]; !ok { - break - } - } - - q.projectsIDs[exportName][projectID] = true - block := strconv.FormatUint(uint64(projectID), 10) + ":" + directory + ":" + limit + "\n" - file, err := os.OpenFile(projectsFile, os.O_APPEND|os.O_WRONLY, 0600) // #nosec - if err != nil { - q.mutex.Unlock() - return "", err - } - - defer func() { - if err := file.Close(); err != nil { - logger.Errorf("Error closing file: %s\n", err) - } - }() - - if _, err = file.WriteString(block); err != nil { - q.mutex.Unlock() - return "", err - } - - if err := file.Sync(); err != nil { - q.mutex.Unlock() - return "", err - } - - logger.Infof("set project to %s for directory %s with limit %s", projectsFile, directory, limit) - if err := q.setProject(projectID, projectsFile, directory); err != nil { - q.mutex.Unlock() - return "", err - } - - logger.Infof("set quota for project id %d with limit %s", projectID, limit) - if err := q.setQuota(projectID, projectsFile, directory, limit); err != nil { - q.mutex.Unlock() - _ = q.removeProject(projectID, projectsFile, block) - } - - q.mutex.Unlock() - return block, nil -} - -func (q *Quota) RemoveProjectQuota(projectID uint16, projectsFile, block string) error { - return q.removeProject(projectID, projectsFile, block) -} - -func (q *Quota) RestoreProjectQuota() error { - mountEntries, err := findProjectQuotaMount() - if err != nil { - return err - } - - for _, entry := range mountEntries { - projectsFile := filepath.Join(entry.Mountpoint, "projects") - if _, err := os.Stat(projectsFile); err != nil { - if os.IsNotExist(err) { - continue - } - - return err - } - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - return err - } - - re := regexp.MustCompile("(?m:^([0-9]+):(.+):(.+)$\n)") - matches := re.FindAllSubmatch(read, -1) - for _, match := range matches { - projectID, _ := strconv.ParseUint(string(match[1]), 10, 16) - directory := string(match[2]) - bhard := string(match[3]) - - if _, err := os.Stat(directory); os.IsNotExist(err) { - _ = q.removeProject(uint16(projectID), projectsFile, string(match[0])) - continue - } - - if err := q.setProject(uint16(projectID), projectsFile, directory); err != nil { - return err - } - - logger.Infof("restoring quotas from project file %s for project id %s", string(match[1]), projectsFile) - if err := q.setQuota(uint16(projectID), projectsFile, directory, bhard); err != nil { - return fmt.Errorf("error restoring quota for directory %s: %v", directory, err) - } - } - } - - return nil -} - -func (q *Quota) setProject(projectID uint16, projectsFile, directory string) error { - cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("project -s -p %s %s", directory, strconv.FormatUint(uint64(projectID), 10)), filepath.Dir(projectsFile)) // #nosec - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out) - } - - return nil -} - -func (q *Quota) setQuota(projectID uint16, projectsFile, directory, bhard string) error { - exportName := filepath.Base(filepath.Dir(projectsFile)) - if !q.projectsIDs[exportName][projectID] { - return fmt.Errorf("project with id %v has not been added", projectID) - } - - cmd := exec.Command("xfs_quota", "-x", "-c", fmt.Sprintf("limit -p bhard=%s %s", bhard, strconv.FormatUint(uint64(projectID), 10)), filepath.Dir(projectsFile)) // #nosec - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("xfs_quota failed with error: %v, output: %s", err, out) - } - - return nil -} - -func (q *Quota) removeProject(projectID uint16, projectsFile, block string) error { - exportName := filepath.Base(filepath.Dir(projectsFile)) - q.mutex.Lock() - delete(q.projectsIDs[exportName], projectID) - read, err := ioutil.ReadFile(projectsFile) // #nosec - if err != nil { - q.mutex.Unlock() - return err - } - - removed := strings.Replace(string(read), block, "", -1) - err = ioutil.WriteFile(projectsFile, []byte(removed), 0) - if err != nil { - q.mutex.Unlock() - return err - } - - q.mutex.Unlock() - return nil -} diff --git a/pkg/operator/nfs/quota_fake.go b/pkg/operator/nfs/quota_fake.go deleted file mode 100644 index fc9e2ebf9b04..000000000000 --- a/pkg/operator/nfs/quota_fake.go +++ /dev/null @@ -1,19 +0,0 @@ -package nfs - -type FakeQuota struct{} - -func NewFakeProjectQuota() (Quotaer, error) { - return &FakeQuota{}, nil -} - -func (q *FakeQuota) CreateProjectQuota(projectsFile, directory, limit string) (string, error) { - return "", nil -} - -func (q *FakeQuota) RemoveProjectQuota(projectID uint16, projectsFile, block string) error { - return nil -} - -func (q *FakeQuota) RestoreProjectQuota() error { - return nil -} diff --git a/pkg/operator/nfs/server.go b/pkg/operator/nfs/server.go deleted file mode 100644 index a61785c133bb..000000000000 --- a/pkg/operator/nfs/server.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Portion of this file is coming from https://github.com/kubernetes-incubator/external-storage/blob/master/nfs/pkg/server/server.go -package nfs - -import ( - "fmt" - "os/exec" - "syscall" -) - -const ( - ganeshaLog = "/dev/stdout" - ganeshaOptions = "NIV_INFO" -) - -// Setup sets up various prerequisites and settings for the server. If an error -// is encountered at any point it returns it instantly -func Setup(ganeshaConfig string) error { - // Start rpcbind if it is not started yet - cmd := exec.Command("rpcinfo", "127.0.0.1") - if err := cmd.Run(); err != nil { - cmd = exec.Command("rpcbind", "-w") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("Starting rpcbind failed with error: %v, output: %s", err, out) - } - } - - cmd = exec.Command("rpc.statd") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("rpc.statd failed with error: %v, output: %s", err, out) - } - - // Start dbus, needed for ganesha dynamic exports - cmd = exec.Command("dbus-daemon", "--system") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("dbus-daemon failed with error: %v, output: %s", err, out) - } - - err := setRlimitNOFILE() - if err != nil { - logger.Warningf("Error setting RLIMIT_NOFILE, there may be \"Too many open files\" errors later: %v", err) - } - return nil -} - -// Run : run the NFS server in the foreground until it exits -// Ideally, it should never exit when run in foreground mode -// We force foreground to allow the provisioner process to restart -// the server if it crashes - daemonization prevents us from using Wait() -// for this purpose -func Run(ganeshaConfig string) error { - // Start ganesha.nfsd - logger.Infof("Running NFS server!") - // #nosec G204 Rook controls the input to the exec arguments - cmd := exec.Command("ganesha.nfsd", "-F", "-L", ganeshaLog, "-f", ganeshaConfig, "-N", ganeshaOptions) - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("ganesha.nfsd failed with error: %v, output: %s", err, out) - } - return nil -} - -func setRlimitNOFILE() error { - var rlimit syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err) - } - logger.Infof("starting RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max) - rlimit.Max = 1024 * 1024 - rlimit.Cur = 1024 * 1024 - err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return err - } - err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - return fmt.Errorf("error getting RLIMIT_NOFILE: %v", err) - } - logger.Infof("ending RLIMIT_NOFILE rlimit.Cur %d, rlimit.Max %d", rlimit.Cur, rlimit.Max) - return nil -} - -// Stop stops the NFS server. -func Stop() { - // /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown -} diff --git a/pkg/operator/nfs/spec.go b/pkg/operator/nfs/spec.go deleted file mode 100644 index 1dc1d97ffc24..000000000000 --- a/pkg/operator/nfs/spec.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - "context" - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - "github.com/rook/rook/pkg/operator/k8sutil" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -func newLabels(cr *nfsv1alpha1.NFSServer) map[string]string { - return map[string]string{ - "app": cr.Name, - } -} - -func newConfigMapForNFSServer(cr *nfsv1alpha1.NFSServer) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - } -} - -func newServiceForNFSServer(cr *nfsv1alpha1.NFSServer) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: corev1.ServiceSpec{ - Selector: newLabels(cr), - Type: corev1.ServiceTypeClusterIP, - Ports: []corev1.ServicePort{ - { - Name: "nfs", - Port: int32(nfsPort), - TargetPort: intstr.FromInt(int(nfsPort)), - }, - { - Name: "rpc", - Port: int32(rpcPort), - TargetPort: intstr.FromInt(int(rpcPort)), - }, - }, - }, - } -} - -func newStatefulSetForNFSServer(cr *nfsv1alpha1.NFSServer, clientset kubernetes.Interface, ctx context.Context) (*appsv1.StatefulSet, error) { - pod, err := k8sutil.GetRunningPod(clientset) - if err != nil { - return nil, err - } - image, err := k8sutil.GetContainerImage(pod, "") - if err != nil { - return nil, err - } - - privileged := true - replicas := int32(cr.Spec.Replicas) - return &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - ServiceName: cr.Name, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name, - Namespace: cr.Namespace, - Labels: newLabels(cr), - }, - Spec: corev1.PodSpec{ - ServiceAccountName: "rook-nfs-server", - Containers: []corev1.Container{ - { - Name: "nfs-server", - Image: image, - Args: []string{"nfs", "server", "--ganeshaConfigPath=" + nfsConfigMapPath + "/" + cr.Name}, - Ports: []corev1.ContainerPort{ - { - Name: "nfs-port", - ContainerPort: int32(nfsPort), - }, - { - Name: "rpc-port", - ContainerPort: int32(rpcPort), - }, - }, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{ - "SYS_ADMIN", - "DAC_READ_SEARCH", - }, - }, - }, - }, - { - Name: "nfs-provisioner", - Image: image, - Args: []string{"nfs", "provisioner", "--provisioner=" + "nfs.rook.io/" + cr.Name + "-provisioner"}, - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: corev1.TerminationMessageReadFile, - SecurityContext: &corev1.SecurityContext{ - Privileged: &privileged, - }, - }, - }, - }, - }, - }, - }, nil -} diff --git a/pkg/operator/nfs/webhook.go b/pkg/operator/nfs/webhook.go deleted file mode 100644 index cc399e8227b4..000000000000 --- a/pkg/operator/nfs/webhook.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nfs - -import ( - nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1" - - ctrl "sigs.k8s.io/controller-runtime" -) - -type Webhook struct { - Port int - CertDir string -} - -func NewWebhook(port int, certDir string) *Webhook { - return &Webhook{ - Port: port, - CertDir: certDir, - } -} - -func (w *Webhook) Run() error { - opts := ctrl.Options{ - Port: w.Port, - Scheme: scheme, - } - - if w.CertDir != "" { - opts.CertDir = w.CertDir - } - - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - return err - } - - if err := ctrl.NewWebhookManagedBy(mgr). - For(&nfsv1alpha1.NFSServer{}). - Complete(); err != nil { - return err - } - - logger.Info("starting webhook manager") - return mgr.Start(ctrl.SetupSignalHandler()) -} diff --git a/pkg/util/exec/exec.go b/pkg/util/exec/exec.go index 9e505eeb40dd..67913e1f512a 100644 --- a/pkg/util/exec/exec.go +++ b/pkg/util/exec/exec.go @@ -37,7 +37,7 @@ import ( ) var ( - CephCommandTimeout = 15 * time.Second + CephCommandsTimeout = 15 * time.Second ) // Executor is the main interface for all the exec commands @@ -336,18 +336,21 @@ func ExtractExitCode(err error) (int, error) { case *kexec.CodeExitError: return errType.ExitStatus(), nil + // have to check both *kexec.CodeExitError and kexec.CodeExitError because CodeExitError methods + // are not defined with pointer receivers; both pointer and non-pointers are valid `error`s. + case kexec.CodeExitError: + return errType.ExitStatus(), nil + case *kerrors.StatusError: return int(errType.ErrStatus.Code), nil default: logger.Debugf(err.Error()) - // This is ugly but I don't know why the type assertion does not work... - // Whatever I've tried I can see the type "exec.CodeExitError" but none of the "case" nor other attempts with "errors.As()" worked :( - // So I'm parsing the Error string until we have a solution + // This is ugly, but it's a decent backup just in case the error isn't a type above. if strings.Contains(err.Error(), "command terminated with exit code") { a := strings.SplitAfter(err.Error(), "command terminated with exit code") return strconv.Atoi(strings.TrimSpace(a[1])) } - return 0, errors.Errorf("error %#v is not an ExitError nor CodeExitError but is %v", err, reflect.TypeOf(err)) + return -1, errors.Errorf("error %#v is an unknown error type: %v", err, reflect.TypeOf(err)) } } diff --git a/pkg/util/exec/exec_pod.go b/pkg/util/exec/exec_pod.go index fa3c54179724..73b4b105ec30 100644 --- a/pkg/util/exec/exec_pod.go +++ b/pkg/util/exec/exec_pod.go @@ -131,5 +131,5 @@ func execute(method string, url *url.URL, config *rest.Config, stdin io.Reader, } func (e *RemotePodCommandExecutor) ExecCommandInContainerWithFullOutputWithTimeout(appLabel, containerName, namespace string, cmd ...string) (string, string, error) { - return e.ExecCommandInContainerWithFullOutput(appLabel, containerName, namespace, append([]string{"timeout", strconv.Itoa(int(CephCommandTimeout.Seconds()))}, cmd...)...) + return e.ExecCommandInContainerWithFullOutput(appLabel, containerName, namespace, append([]string{"timeout", strconv.Itoa(int(CephCommandsTimeout.Seconds()))}, cmd...)...) } diff --git a/pkg/util/exec/exec_test.go b/pkg/util/exec/exec_test.go index dd948d0d4757..4bcc9f38d734 100644 --- a/pkg/util/exec/exec_test.go +++ b/pkg/util/exec/exec_test.go @@ -17,9 +17,14 @@ limitations under the License. package exec import ( - "errors" "os/exec" "testing" + + "github.com/pkg/errors" + exectest "github.com/rook/rook/pkg/util/exec/test" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kexec "k8s.io/utils/exec" ) func Test_assertErrorType(t *testing.T) { @@ -43,3 +48,65 @@ func Test_assertErrorType(t *testing.T) { }) } } + +// import TestMockExecHelperProcess +func TestMockExecHelperProcess(t *testing.T) { + exectest.TestMockExecHelperProcess(t) +} + +func TestExtractExitCode(t *testing.T) { + mockExecExitError := func(retcode int) *exec.ExitError { + // we can't create an exec.ExitError directly, but we can get one by running a command that fails + // use go's type assertion to be sure we are returning exactly *exec.ExitError + err := exectest.MockExecCommandReturns(t, "stdout", "stderr", retcode) + + ee, ok := err.(*exec.ExitError) + if !ok { + t.Fatalf("failed to create an *exec.ExitError. instead %T", err) + } + return ee + } + + expectError := true + noError := false + + tests := []struct { + name string + inputErr error + want int + wantErr bool + }{ + {"*exec.ExitError", + mockExecExitError(3), + 3, noError}, + /* {"exec.ExitError", // non-pointer case is impossible (won't compile) */ + {"*kexec.CodeExitError (pointer)", + &kexec.CodeExitError{Err: errors.New("some error"), Code: 4}, + 4, noError}, + {"kexec.CodeExitError (non-pointer)", + kexec.CodeExitError{Err: errors.New("some error"), Code: 5}, + 5, noError}, + {"*kerrors.StatusError", + &kerrors.StatusError{ErrStatus: metav1.Status{Code: 6}}, + 6, noError}, + /* {"kerrors.StatusError", // non-pointer case is impossible (won't compile) */ + {"unknown error type with error code extractable from error message", + errors.New("command terminated with exit code 7"), + 7, noError}, + {"unknown error type with no extractable error code", + errors.New("command with no extractable error code even with an int here: 8"), + -1, expectError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ExtractExitCode(tt.inputErr) + if (err != nil) != tt.wantErr { + t.Errorf("ExtractExitCode() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ExtractExitCode() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/util/exec/test/mockexec.go b/pkg/util/exec/test/mockexec.go index f2d5d2989536..b7f1815ad3fc 100644 --- a/pkg/util/exec/test/mockexec.go +++ b/pkg/util/exec/test/mockexec.go @@ -17,7 +17,11 @@ limitations under the License. package test import ( + "fmt" + "os" "os/exec" + "strconv" + "testing" "time" ) @@ -76,3 +80,47 @@ func (e *MockExecutor) ExecuteCommandWithCombinedOutput(command string, arg ...s return "", nil } + +// Mock an executed command with the desired return values. +// STDERR is returned *before* STDOUT. +// +// This will return an error if the given exit code is nonzero. The error return is the primary +// benefit of using this method. +// +// In order for this to work in a `*_test.go` file, you MUST import TestMockExecHelperProcess +// exactly as shown below: +// import exectest "github.com/rook/rook/pkg/util/exec/test" +// // import TestMockExecHelperProcess +// func TestMockExecHelperProcess(t *testing.T) { +// exectest.TestMockExecHelperProcess(t) +// } +// Inspired by: https://github.com/golang/go/blob/master/src/os/exec/exec_test.go +func MockExecCommandReturns(t *testing.T, stdout, stderr string, retcode int) error { + cmd := exec.Command(os.Args[0], "-test.run=TestMockExecHelperProcess") //nolint:gosec //Rook controls the input to the exec arguments + cmd.Env = append(os.Environ(), + "GO_WANT_HELPER_PROCESS=1", + fmt.Sprintf("GO_HELPER_PROCESS_STDOUT=%s", stdout), + fmt.Sprintf("GO_HELPER_PROCESS_STDERR=%s", stderr), + fmt.Sprintf("GO_HELPER_PROCESS_RETCODE=%d", retcode), + ) + err := cmd.Run() + return err +} + +// TestHelperProcess isn't a real test. It's used as a helper process for MockExecCommandReturns to +// simulate output from a command. Notably, this can return a realistic os/exec error. +// Inspired by: https://github.com/golang/go/blob/master/src/os/exec/exec_test.go +func TestMockExecHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + // test should set these in its environment to control the output of the test commands + fmt.Fprint(os.Stderr, os.Getenv("GO_HELPER_PROCESS_STDERR")) // return stderr before stdout + fmt.Fprint(os.Stdout, os.Getenv("GO_HELPER_PROCESS_STDOUT")) + rc, err := strconv.Atoi(os.Getenv("GO_HELPER_PROCESS_RETCODE")) + if err != nil { + panic(err) + } + os.Exit(rc) +} diff --git a/pkg/util/file.go b/pkg/util/file.go index 3e034b278efd..ac815b3f4bcf 100644 --- a/pkg/util/file.go +++ b/pkg/util/file.go @@ -25,6 +25,7 @@ import ( "runtime" "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" ) var logger = capnslog.NewPackageLogger("github.com/rook/rook", "util") @@ -60,3 +61,19 @@ func PathToProjectRoot() string { root := filepath.Dir(pkg) // return root } + +// CreateTempFile creates a temporary file with content passed as an argument +func CreateTempFile(content string) (*os.File, error) { + // Generate a temp file + file, err := ioutil.TempFile("", "") + if err != nil { + return nil, errors.Wrap(err, "failed to generate temp file") + } + + // Write content into file + err = ioutil.WriteFile(file.Name(), []byte(content), 0440) + if err != nil { + return nil, errors.Wrap(err, "failed to write content into file") + } + return file, nil +} diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 3c15141ea944..27622e8052c3 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -36,7 +36,7 @@ func Retry(maxRetries int, delay time.Duration, f func() error) error { tries++ if tries > maxRetries { - return fmt.Errorf("max retries exceeded, last err: %+v", err) + return fmt.Errorf("max retries exceeded, last err: %v", err) } logger.Infof("retrying after %v, last error: %v", delay, err) diff --git a/pkg/util/set.go b/pkg/util/set.go deleted file mode 100644 index 2801b59fa005..000000000000 --- a/pkg/util/set.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -type Set struct { - values map[string]struct{} -} - -// Create a new empty set -func NewSet() *Set { - set := &Set{} - set.values = make(map[string]struct{}) - return set -} - -// Create a new set from the array -func CreateSet(values []string) *Set { - set := &Set{} - set.values = make(map[string]struct{}) - for _, value := range values { - set.add(value) - } - return set -} - -// Create a copy of the set -func (s *Set) Copy() *Set { - set := NewSet() - for value := range s.values { - set.values[value] = struct{}{} - } - - return set -} - -// Subtract the subset from the set -func (s *Set) Subtract(subset *Set) { - // Iterate over each element in the set to see if it's in the subset - for value := range s.values { - if _, ok := subset.values[value]; ok { - delete(s.values, value) - } - } -} - -// Add a value to the set. Returns true if the value was added, false if it already exists. -func (s *Set) Add(newValue string) bool { - if _, ok := s.values[newValue]; !ok { - s.add(newValue) - return true - } - - // The value is already in the set - return false -} - -// Remove a value from the set. Returns true if the value was removed, false if it does not exist. -func (s *Set) Remove(oldValue string) bool { - if _, ok := s.values[oldValue]; ok { - delete(s.values, oldValue) - return true - } - - // The value is not in the set - return false -} - -// Add the value to the set -func (s *Set) add(value string) { - s.values[value] = struct{}{} -} - -// Check whether a value is already contained in the set -func (s *Set) Contains(value string) bool { - _, ok := s.values[value] - return ok -} - -// Iterate over the items in the set -func (s *Set) Iter() <-chan string { - channel := make(chan string) - go func() { - for value := range s.values { - channel <- value - } - close(channel) - }() - return channel -} - -// Get the count of items in the set -func (s *Set) Count() int { - return len(s.values) -} - -// Add other set items -func (s *Set) AddSet(other *Set) { - for value := range other.Iter() { - s.add(value) - } -} - -// Add multiple items more efficiently -func (s *Set) AddMultiple(values []string) { - for _, value := range values { - s.add(value) - } -} - -// Check if two sets contain the same elements -func (s *Set) Equals(other *Set) bool { - if s.Count() != other.Count() { - return false - } - - for value := range s.Iter() { - if !other.Contains(value) { - return false - } - } - - return true -} - -// Convert the set to an array -func (s *Set) ToSlice() []string { - values := []string{} - for value := range s.values { - values = append(values, value) - } - - return values -} - -// find items in the left slice that are not in the right slice -func SetDifference(left, right []string) *Set { - result := NewSet() - for _, leftItem := range left { - foundItem := false - - // search for the left item in the right set - for _, rightItem := range right { - if leftItem == rightItem { - foundItem = true - break - } - } - - if !foundItem { - result.Add(leftItem) - } - } - - return result -} diff --git a/pkg/util/set_test.go b/pkg/util/set_test.go deleted file mode 100644 index 3f4323743884..000000000000 --- a/pkg/util/set_test.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSubtract(t *testing.T) { - set := CreateSet([]string{"a", "b", "x", "y", "z"}) - subset := CreateSet([]string{"b", "z"}) - set.Subtract(subset) - assert.Equal(t, 3, set.Count()) - assert.True(t, set.Contains("a")) - assert.False(t, set.Contains("b")) - assert.True(t, set.Contains("x")) - assert.True(t, set.Contains("y")) - assert.False(t, set.Contains("z")) -} - -func TestSubtractEmptySet(t *testing.T) { - // Both sets empty - set := NewSet() - subset := NewSet() - set.Subtract(subset) - assert.Equal(t, 0, set.Count()) - - // Subset is empty - set = CreateSet([]string{"1", "2"}) - set.Subtract(subset) - assert.Equal(t, 2, set.Count()) -} - -func TestAddSingle(t *testing.T) { - set := NewSet() - assert.True(t, set.Add("foo")) - assert.False(t, set.Add("foo")) - - assert.Equal(t, 1, set.Count()) - assert.True(t, set.Contains("foo")) - assert.False(t, set.Contains("bar")) - - assert.True(t, set.Add("bar")) - assert.Equal(t, 2, set.Count()) - assert.True(t, set.Contains("foo")) - assert.True(t, set.Contains("bar")) - assert.False(t, set.Contains("baz")) -} - -func TestAddMultiple(t *testing.T) { - set := NewSet() - set.AddMultiple([]string{"a", "b", "z"}) - assert.Equal(t, 3, set.Count()) - assert.True(t, set.Contains("a")) - assert.True(t, set.Contains("b")) - assert.False(t, set.Contains("c")) - assert.True(t, set.Contains("z")) -} - -func TestToSlice(t *testing.T) { - set := CreateSet([]string{"1", "2", "3"}) - arr := set.ToSlice() - assert.Equal(t, 3, len(arr)) - - // Empty set - set = CreateSet([]string{}) - setSlice := set.ToSlice() - assert.NotNil(t, setSlice) - assert.Equal(t, 0, len(setSlice)) -} - -func TestCopy(t *testing.T) { - set := CreateSet([]string{"x", "y", "z"}) - copySet := set.Copy() - assert.Equal(t, 3, copySet.Count()) - assert.True(t, copySet.Contains("x")) - assert.True(t, copySet.Contains("y")) - assert.True(t, copySet.Contains("z")) - assert.False(t, copySet.Contains("a")) -} - -func TestIter(t *testing.T) { - set := CreateSet([]string{"a", "b", "c", "x", "y", "z"}) - count := 0 - for range set.Iter() { - count++ - } - assert.Equal(t, 6, count) -} - -func TestSetEquals(t *testing.T) { - set := CreateSet([]string{"a", "b"}) - assert.True(t, set.Equals(CreateSet([]string{"a", "b"}))) - assert.False(t, set.Equals(CreateSet([]string{"a", "b", "c"}))) - assert.False(t, set.Equals(CreateSet([]string{"a"}))) - assert.False(t, set.Equals(CreateSet([]string{"a", "x"}))) - - set = CreateSet([]string{}) - assert.True(t, set.Equals(CreateSet([]string{}))) - assert.False(t, set.Equals(CreateSet([]string{"a"}))) -} diff --git a/pkg/util/sys/device.go b/pkg/util/sys/device.go index 727bf6187716..5cbc2da11d95 100644 --- a/pkg/util/sys/device.go +++ b/pkg/util/sys/device.go @@ -209,18 +209,7 @@ func GetDevicePropertiesFromPath(devicePath string, executor exec.Executor) (map output, err := executor.ExecuteCommandWithOutput("lsblk", devicePath, "--bytes", "--nodeps", "--pairs", "--paths", "--output", "SIZE,ROTA,RO,TYPE,PKNAME,NAME,KNAME") if err != nil { - // The "not a block device" error also returns code 32 so the ExitStatus() check hides this error - if strings.Contains(output, "not a block device") { - return nil, err - } - - // try to get more information about the command error - if code, ok := exec.ExitStatus(err); ok && code == 32 { - // certain device types (such as loop) return exit status 32 when probed further, - // ignore and continue without logging - return map[string]string{}, nil - } - + logger.Errorf("failed to execute lsblk. output: %s", output) return nil, err } @@ -418,13 +407,13 @@ func inventoryDevice(executor exec.Executor, devicePath string) (CephVolumeInven args := []string{"inventory", "--format", "json", devicePath} inventory, err := executor.ExecuteCommandWithOutput("ceph-volume", args...) if err != nil { - return CVInventory, fmt.Errorf("failed to execute ceph-volume inventory on disk %q. %v", devicePath, err) + return CVInventory, fmt.Errorf("failed to execute ceph-volume inventory on disk %q. %s. %v", devicePath, inventory, err) } bInventory := []byte(inventory) err = json.Unmarshal(bInventory, &CVInventory) if err != nil { - return CVInventory, fmt.Errorf("error unmarshalling json data coming from ceph-volume inventory %q. %v", devicePath, err) + return CVInventory, fmt.Errorf("failed to unmarshal json data coming from ceph-volume inventory %q. %q. %v", devicePath, inventory, err) } return CVInventory, nil diff --git a/tests/framework/clients/bucket.go b/tests/framework/clients/bucket.go index 70e73f8b5f9d..8f3edeb86959 100644 --- a/tests/framework/clients/bucket.go +++ b/tests/framework/clients/bucket.go @@ -53,6 +53,10 @@ func (b *BucketOperation) DeleteObc(obcName string, storageClassName string, buc return b.k8sh.ResourceOperation("delete", b.manifests.GetOBC(obcName, storageClassName, bucketName, maxObject, createBucket)) } +func (b *BucketOperation) UpdateObc(obcName string, storageClassName string, bucketName string, maxObject string, createBucket bool) error { + return b.k8sh.ResourceOperation("apply", b.manifests.GetOBC(obcName, storageClassName, bucketName, maxObject, createBucket)) +} + // CheckOBC, returns true if the obc, secret and configmap are all in the "check" state, // and returns false if any of these resources are not in the "check" state. // Check state values: @@ -123,3 +127,10 @@ func (b *BucketOperation) GetSecretKey(obcName string) (string, error) { return string(decode), nil } + +// Checks whether MaxObject is updated for ob +func (b *BucketOperation) CheckOBMaxObject(obcName, maxobject string) bool { + obName, _ := b.k8sh.GetResource("obc", obcName, "--output", "jsonpath={.spec.objectBucketName}") + fetchMaxObject, _ := b.k8sh.GetResource("ob", obName, "--output", "jsonpath={.spec.endpoint.additionalConfig.maxObjects}") + return maxobject == fetchMaxObject +} diff --git a/tests/framework/clients/object_user.go b/tests/framework/clients/object_user.go index 6849624efc16..5ca3052b9ab1 100644 --- a/tests/framework/clients/object_user.go +++ b/tests/framework/clients/object_user.go @@ -74,10 +74,10 @@ func (o *ObjectUserOperation) UserSecretExists(namespace string, store string, u } // ObjectUserCreate Function to create a object store user in rook -func (o *ObjectUserOperation) Create(namespace string, userid string, displayName string, store string) error { +func (o *ObjectUserOperation) Create(userid, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) error { logger.Infof("creating the object store user via CRD") - if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStoreUser(userid, displayName, store)); err != nil { + if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStoreUser(userid, displayName, store, usercaps, maxsize, maxbuckets, maxobjects)); err != nil { return err } return nil diff --git a/tests/framework/installer/cassandra_installer.go b/tests/framework/installer/cassandra_installer.go deleted file mode 100644 index a18216a2f011..000000000000 --- a/tests/framework/installer/cassandra_installer.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "context" - "fmt" - "testing" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - cassandraCRD = "clusters.cassandra.rook.io" -) - -type CassandraInstaller struct { - k8sHelper *utils.K8sHelper - manifests *CassandraManifests - T func() *testing.T -} - -func NewCassandraInstaller(k8sHelper *utils.K8sHelper, t func() *testing.T) *CassandraInstaller { - return &CassandraInstaller{k8sHelper, &CassandraManifests{}, t} -} - -func (ci *CassandraInstaller) InstallCassandra(systemNamespace, namespace string, count int, mode cassandrav1alpha1.ClusterMode) error { - - ci.k8sHelper.CreateAnonSystemClusterBinding() - - // Check if a default storage class exists - defaultExists, err := ci.k8sHelper.IsDefaultStorageClassPresent() - if err != nil { - return err - } - if !defaultExists { - if err := CreateHostPathPVs(ci.k8sHelper, 3, true, "5Gi"); err != nil { - return err - } - } else { - logger.Info("skipping install of host path provisioner because a default storage class already exists") - } - - // Install cassandra operator - if err := ci.CreateCassandraOperator(systemNamespace); err != nil { - return err - } - // Create a Cassandra Cluster instance - if err := ci.CreateCassandraCluster(namespace, count, mode); err != nil { - return err - } - return nil -} - -func (ci *CassandraInstaller) CreateCassandraOperator(namespace string) error { - - logger.Info("Starting cassandra operator") - - logger.Info("Creating Cassandra CRD...") - if _, err := ci.k8sHelper.KubectlWithStdin(ci.manifests.GetCassandraCRDs(), createFromStdinArgs...); err != nil { - return err - } - - cassandraOperator := ci.manifests.GetCassandraOperator(namespace) - if _, err := ci.k8sHelper.KubectlWithStdin(cassandraOperator, createFromStdinArgs...); err != nil { - return fmt.Errorf("Failed to create rook-cassandra-operator pod: %+v", err) - } - - if !ci.k8sHelper.IsCRDPresent(cassandraCRD) { - return fmt.Errorf("Failed to find cassandra CRD %s", cassandraCRD) - } - - if !ci.k8sHelper.IsPodInExpectedState("rook-cassandra-operator", namespace, "Running") { - return fmt.Errorf("rook-cassandra-operator is not running, aborting") - } - - logger.Infof("cassandra operator started") - return nil - -} - -func (ci *CassandraInstaller) CreateCassandraCluster(namespace string, count int, mode cassandrav1alpha1.ClusterMode) error { - - // if err := ci.k8sHelper.CreateNamespace(namespace); err != nil { - // return err - // } - - logger.Info("Starting Cassandra Cluster with kubectl and yaml") - cassandraCluster := ci.manifests.GetCassandraCluster(namespace, count, mode) - if _, err := ci.k8sHelper.KubectlWithStdin(cassandraCluster, createFromStdinArgs...); err != nil { - return fmt.Errorf("Failed to create Cassandra Cluster: %s", err.Error()) - } - - if err := ci.k8sHelper.WaitForPodCount("app=rook-cassandra", namespace, count); err != nil { - return fmt.Errorf("Cassandra Cluster pods in namespace %s not found: %s", namespace, err.Error()) - } - - if err := ci.k8sHelper.WaitForLabeledPodsToRun("app=rook-cassandra", namespace); err != nil { - return fmt.Errorf("Cassandra Cluster Pods in namespace %s are not running: %s", namespace, err.Error()) - } - - logger.Infof("Cassandra Cluster started") - return nil -} - -func (ci *CassandraInstaller) DeleteCassandraCluster(namespace string) { - ctx := context.TODO() - // Delete Cassandra Cluster - logger.Infof("Uninstalling Cassandra from namespace %s", namespace) - err := ci.k8sHelper.DeleteResourceAndWait(true, "-n", namespace, cassandraCRD, namespace) - checkError(ci.T(), err, fmt.Sprintf("cannot remove cluster %s", namespace)) - - crdCheckerFunc := func() error { - _, err := ci.k8sHelper.RookClientset.CassandraV1alpha1().Clusters(namespace).Get(ctx, namespace, metav1.GetOptions{}) - return err - } - err = ci.k8sHelper.WaitForCustomResourceDeletion(namespace, namespace, crdCheckerFunc) - assert.NoError(ci.T(), err) - - // Delete Namespace - logger.Infof("Deleting Cassandra Cluster namespace %s", namespace) - err = ci.k8sHelper.DeleteResourceAndWait(true, "namespace", namespace) - checkError(ci.T(), err, fmt.Sprintf("cannot delete namespace %s", namespace)) -} - -func (ci *CassandraInstaller) UninstallCassandra(systemNamespace string, namespace string) { - ctx := context.TODO() - // Delete deployed Cluster - // ci.DeleteCassandraCluster(namespace) - cassandraCluster := ci.manifests.GetCassandraCluster(namespace, 0, "") - _, err := ci.k8sHelper.KubectlWithStdin(cassandraCluster, deleteFromStdinArgs...) - checkError(ci.T(), err, "cannot uninstall cluster") - - // Delete Operator, CRD and RBAC related to them - cassandraOperator := ci.manifests.GetCassandraOperator(systemNamespace) - _, err = ci.k8sHelper.KubectlWithStdin(cassandraOperator, deleteFromStdinArgs...) - checkError(ci.T(), err, "cannot uninstall rook-cassandra-operator") - - cassandraCRDs := ci.manifests.GetCassandraCRDs() - _, err = ci.k8sHelper.KubectlWithStdin(cassandraCRDs, deleteFromStdinArgs...) - checkError(ci.T(), err, "cannot uninstall cassandra CRDs") - - //Remove "anon-user-access" - logger.Info("Removing anon-user-access ClusterRoleBinding") - err = ci.k8sHelper.Clientset.RbacV1().ClusterRoleBindings().Delete(ctx, "anon-user-access", metav1.DeleteOptions{}) - assert.NoError(ci.T(), err) - logger.Info("Successfully deleted all cassandra operator related objects.") -} - -func (ci *CassandraInstaller) GatherAllCassandraLogs(systemNamespace, namespace, testName string) { - if !ci.T().Failed() && TestLogCollectionLevel() != "all" { - return - } - logger.Infof("Gathering all logs from Cassandra Cluster %s", namespace) - ci.k8sHelper.GetLogsFromNamespace(systemNamespace, testName, utils.TestEnvName()) - ci.k8sHelper.GetLogsFromNamespace(namespace, testName, utils.TestEnvName()) -} diff --git a/tests/framework/installer/cassandra_manifests.go b/tests/framework/installer/cassandra_manifests.go deleted file mode 100644 index 413c0a23db6d..000000000000 --- a/tests/framework/installer/cassandra_manifests.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "fmt" - "strings" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" -) - -type CassandraManifests struct{} - -func (i *CassandraManifests) GetCassandraCRDs() string { - manifest := readManifest("cassandra", "crds.yaml") - return manifest -} - -func (i *CassandraManifests) GetCassandraOperator(namespace string) string { - manifest := readManifest("cassandra", "operator.yaml") - manifest = strings.ReplaceAll(manifest, "rook-cassandra-system # namespace:operator", namespace) - - return manifest -} - -func (i *CassandraManifests) GetCassandraCluster(namespace string, count int, mode cassandrav1alpha1.ClusterMode) string { - - var version string - if mode == cassandrav1alpha1.ClusterModeScylla { - version = "2.3.0" - } else { - version = "3.11.6" - } - return fmt.Sprintf(` -# Namespace for cassandra cluster -apiVersion: v1 -kind: Namespace -metadata: - name: %[1]s - ---- - -# Role for cassandra members. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: %[1]s-member - namespace: %[1]s -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - patch - - watch - - apiGroups: - - cassandra.rook.io - resources: - - clusters - verbs: - - get - ---- - -# ServiceAccount for cassandra members. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: %[1]s-member - namespace: %[1]s - ---- - -# RoleBinding for cassandra members. -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: %[1]s-member - namespace: %[1]s -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: %[1]s-member -subjects: -- kind: ServiceAccount - name: %[1]s-member - namespace: %[1]s - ---- - -# Cassandra Cluster -apiVersion: cassandra.rook.io/v1alpha1 -kind: Cluster -metadata: - name: %[1]s - namespace: %[1]s -spec: - version: %[4]s - mode: %[3]s - datacenter: - name: "us-east-1" - racks: - - name: "us-east-1a" - members: %[2]d - storage: - volumeClaimTemplates: - - metadata: - name: %[1]s-data - spec: - resources: - requests: - storage: 5Gi - resources: - requests: - cpu: 1 - memory: 2Gi - limits: - cpu: 1 - memory: 2Gi -`, namespace, count, mode, version) -} diff --git a/tests/framework/installer/ceph_helm_installer.go b/tests/framework/installer/ceph_helm_installer.go index e6c6cd6f4fef..6e9c753793af 100644 --- a/tests/framework/installer/ceph_helm_installer.go +++ b/tests/framework/installer/ceph_helm_installer.go @@ -17,8 +17,13 @@ limitations under the License. package installer import ( + "context" + "fmt" + "time" + "github.com/pkg/errors" "gopkg.in/yaml.v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -26,6 +31,16 @@ const ( CephClusterChartName = "rook-ceph-cluster" ) +// The Ceph Storage CustomResource and StorageClass names used in testing +const ( + blockPoolName = "ceph-block-test" + blockPoolSCName = "ceph-block-test-sc" + filesystemName = "ceph-filesystem-test" + filesystemSCName = "ceph-filesystem-test-sc" + objectStoreName = "ceph-objectstore-test" + objectStoreSCName = "ceph-bucket-test-sc" +) + // CreateRookOperatorViaHelm creates rook operator via Helm chart named local/rook present in local repo func (h *CephInstaller) CreateRookOperatorViaHelm(values map[string]interface{}) error { // create the operator namespace before the admission controller is created @@ -59,10 +74,20 @@ func (h *CephInstaller) CreateRookCephClusterViaHelm(values map[string]interface values["configOverride"] = clusterCustomSettings values["toolbox"] = map[string]interface{}{ "enabled": true, - "image": "rook/ceph:master", + "image": "rook/ceph:" + LocalBuildTag, } values["cephClusterSpec"] = clusterCRD["spec"] + if err := h.CreateBlockPoolConfiguration(values, blockPoolName, blockPoolSCName); err != nil { + return err + } + if err := h.CreateFileSystemConfiguration(values, filesystemName, filesystemSCName); err != nil { + return err + } + if err := h.CreateObjectStoreConfiguration(values, objectStoreName, objectStoreSCName); err != nil { + return err + } + logger.Infof("Creating ceph cluster using Helm with values: %+v", values) if err := h.helmHelper.InstallLocalRookHelmChart(h.settings.Namespace, CephClusterChartName, values); err != nil { return err @@ -70,3 +95,151 @@ func (h *CephInstaller) CreateRookCephClusterViaHelm(values map[string]interface return nil } + +// RemoveRookCephClusterHelmDefaultCustomResources tidies up the helm created CRs and Storage Classes, as they interfere with other tests. +func (h *CephInstaller) RemoveRookCephClusterHelmDefaultCustomResources() error { + if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), blockPoolSCName, v1.DeleteOptions{}); err != nil { + return err + } + if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), filesystemSCName, v1.DeleteOptions{}); err != nil { + return err + } + if err := h.k8shelper.Clientset.StorageV1().StorageClasses().Delete(context.TODO(), objectStoreSCName, v1.DeleteOptions{}); err != nil { + return err + } + if err := h.k8shelper.RookClientset.CephV1().CephBlockPools(h.settings.Namespace).Delete(context.TODO(), blockPoolName, v1.DeleteOptions{}); err != nil { + return err + } + if err := h.k8shelper.RookClientset.CephV1().CephFilesystems(h.settings.Namespace).Delete(context.TODO(), filesystemName, v1.DeleteOptions{}); err != nil { + return err + } + if err := h.k8shelper.RookClientset.CephV1().CephObjectStores(h.settings.Namespace).Delete(context.TODO(), objectStoreName, v1.DeleteOptions{}); err != nil { + return err + } + if !h.k8shelper.WaitUntilPodWithLabelDeleted(fmt.Sprintf("rook_object_store=%s", objectStoreName), h.settings.Namespace) { + return fmt.Errorf("rgw did not stop via crd") + } + return nil +} + +// ConfirmHelmClusterInstalledCorrectly runs some validation to check whether the helm chart installed correctly. +func (h *CephInstaller) ConfirmHelmClusterInstalledCorrectly() error { + storageClassList, err := h.k8shelper.Clientset.StorageV1().StorageClasses().List(context.TODO(), v1.ListOptions{}) + if err != nil { + return err + } + + foundStorageClasses := 0 + for _, storageClass := range storageClassList.Items { + if storageClass.Name == blockPoolSCName { + foundStorageClasses++ + } else if storageClass.Name == filesystemSCName { + foundStorageClasses++ + } else if storageClass.Name == objectStoreSCName { + foundStorageClasses++ + } + } + if foundStorageClasses != 3 { + return fmt.Errorf("did not find the three storage classes which should have been deployed") + } + + // check that ObjectStore is created + logger.Infof("Check that RGW pods are Running") + for i := 0; i < 24 && !h.k8shelper.CheckPodCountAndState("rook-ceph-rgw", h.settings.Namespace, 2, "Running"); i++ { + logger.Infof("(%d) RGW pod check sleeping for 5 seconds ...", i) + time.Sleep(5 * time.Second) + } + if !h.k8shelper.CheckPodCountAndState("rook-ceph-rgw", h.settings.Namespace, 2, "Running") { + return fmt.Errorf("did not find the rados gateway pod, which should have been deployed") + } + return nil +} + +// CreateBlockPoolConfiguration creates a block store configuration +func (h *CephInstaller) CreateBlockPoolConfiguration(values map[string]interface{}, name, scName string) error { + testBlockPoolBytes := []byte(h.Manifests.GetBlockPool("testPool", "1")) + var testBlockPoolCRD map[string]interface{} + if err := yaml.Unmarshal(testBlockPoolBytes, &testBlockPoolCRD); err != nil { + return err + } + + storageClassBytes := []byte(h.Manifests.GetBlockStorageClass(name, scName, "Delete")) + var testBlockSC map[string]interface{} + if err := yaml.Unmarshal(storageClassBytes, &testBlockSC); err != nil { + return err + } + + values["cephBlockPools"] = []map[string]interface{}{ + { + "name": name, + "spec": testBlockPoolCRD["spec"], + "storageClass": map[string]interface{}{ + "enabled": true, + "isDefault": true, + "name": scName, + "parameters": testBlockSC["parameters"], + "reclaimPolicy": "Delete", + "allowVolumeExpansion": true, + }, + }, + } + return nil +} + +// CreateFileSystemConfiguration creates a filesystem configuration +func (h *CephInstaller) CreateFileSystemConfiguration(values map[string]interface{}, name, scName string) error { + testFilesystemBytes := []byte(h.Manifests.GetFilesystem("testFilesystem", 1)) + var testFilesystemCRD map[string]interface{} + if err := yaml.Unmarshal(testFilesystemBytes, &testFilesystemCRD); err != nil { + return err + } + + storageClassBytes := []byte(h.Manifests.GetFileStorageClass(name, scName)) + var testFileSystemSC map[string]interface{} + if err := yaml.Unmarshal(storageClassBytes, &testFileSystemSC); err != nil { + return err + } + + values["cephFileSystems"] = []map[string]interface{}{ + { + "name": name, + "spec": testFilesystemCRD["spec"], + "storageClass": map[string]interface{}{ + "enabled": true, + "name": scName, + "parameters": testFileSystemSC["parameters"], + "reclaimPolicy": "Delete", + }, + }, + } + return nil +} + +// CreateObjectStoreConfiguration creates an object store configuration +func (h *CephInstaller) CreateObjectStoreConfiguration(values map[string]interface{}, name, scName string) error { + testObjectStoreBytes := []byte(h.Manifests.GetObjectStore(name, 2, 8080, false)) + var testObjectStoreCRD map[string]interface{} + if err := yaml.Unmarshal(testObjectStoreBytes, &testObjectStoreCRD); err != nil { + return err + } + + storageClassBytes := []byte(h.Manifests.GetBucketStorageClass(name, scName, "Delete", "us-east-1")) + var testObjectStoreSC map[string]interface{} + if err := yaml.Unmarshal(storageClassBytes, &testObjectStoreSC); err != nil { + return err + } + + values["cephObjectStores"] = []map[string]interface{}{ + { + "name": name, + "spec": testObjectStoreCRD["spec"], + "storageClass": map[string]interface{}{ + "enabled": true, + "name": scName, + "parameters": testObjectStoreSC["parameters"], + "reclaimPolicy": "Delete", + }, + }, + } + return nil +} diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go index 69556095e790..12b40c44a21f 100644 --- a/tests/framework/installer/ceph_installer.go +++ b/tests/framework/installer/ceph_installer.go @@ -50,7 +50,7 @@ const ( // test with the latest octopus build octopusTestImage = "quay.io/ceph/ceph:v15" // test with the latest pacific build - pacificTestImage = "quay.io/ceph/ceph:v16" + pacificTestImage = "quay.io/ceph/ceph:v16.2.6" // test with the latest master image masterTestImage = "ceph/daemon-base:latest-master-devel" cephOperatorLabel = "app=rook-ceph-operator" @@ -61,14 +61,18 @@ const ( osd_pool_default_size = 1 bdev_flock_retry = 20 ` + volumeReplicationVersion = "v0.1.0" ) var ( - NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} - NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} - OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} - PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} - MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} + NautilusVersion = cephv1.CephVersionSpec{Image: nautilusTestImage} + NautilusPartitionVersion = cephv1.CephVersionSpec{Image: nautilusTestImagePartition} + OctopusVersion = cephv1.CephVersionSpec{Image: octopusTestImage} + PacificVersion = cephv1.CephVersionSpec{Image: pacificTestImage} + MasterVersion = cephv1.CephVersionSpec{Image: masterTestImage, AllowUnsupported: true} + volumeReplicationBaseURL = fmt.Sprintf("https://raw.githubusercontent.com/csi-addons/volume-replication-operator/%s/config/crd/bases/", volumeReplicationVersion) + volumeReplicationCRDURL = volumeReplicationBaseURL + "replication.storage.openshift.io_volumereplications.yaml" + volumeReplicationClassCRDURL = volumeReplicationBaseURL + "replication.storage.openshift.io_volumereplicationclasses.yaml" ) // CephInstaller wraps installing and uninstalling rook on a platform @@ -114,6 +118,10 @@ func (h *CephInstaller) CreateCephOperator() (err error) { return errors.Errorf("Failed to start admission controllers: %v", err) } + if err := h.CreateVolumeReplicationCRDs(); err != nil { + return errors.Wrap(err, "failed to create volume replication CRDs") + } + _, err = h.k8shelper.KubectlWithStdin(h.Manifests.GetOperator(), createFromStdinArgs...) if err != nil { return errors.Errorf("Failed to create rook-operator pod: %v", err) @@ -123,6 +131,27 @@ func (h *CephInstaller) CreateCephOperator() (err error) { return nil } +func (h *CephInstaller) CreateVolumeReplicationCRDs() (err error) { + if !h.Manifests.Settings().EnableVolumeReplication { + logger.Info("volume replication CRDs skipped") + return nil + } + if !h.k8shelper.VersionAtLeast("v1.16.0") { + logger.Info("volume replication CRDs skipped on older than k8s 1.16") + return nil + } + + logger.Info("Creating volume replication CRDs") + if _, err := h.k8shelper.KubectlWithStdin(readManifestFromURL(volumeReplicationCRDURL), createFromStdinArgs...); err != nil { + return errors.Wrap(err, "failed to create volumereplication CRD") + } + + if _, err := h.k8shelper.KubectlWithStdin(readManifestFromURL(volumeReplicationClassCRDURL), createFromStdinArgs...); err != nil { + return errors.Wrap(err, "failed to create volumereplicationclass CRD") + } + return nil +} + func (h *CephInstaller) startAdmissionController() error { if !h.k8shelper.VersionAtLeast("v1.16.0") { logger.Info("skipping the admission controller on K8s version older than v1.16") @@ -448,7 +477,7 @@ func (h *CephInstaller) installRookOperator() (bool, error) { startDiscovery = true err := h.CreateRookOperatorViaHelm(map[string]interface{}{ "enableDiscoveryDaemon": true, - "image": map[string]interface{}{"tag": "master"}, + "image": map[string]interface{}{"tag": LocalBuildTag}, }) if err != nil { return false, errors.Wrap(err, "failed to configure helm") @@ -487,7 +516,7 @@ func (h *CephInstaller) installRookOperator() (bool, error) { } func (h *CephInstaller) InstallRook() (bool, error) { - if h.settings.RookVersion != VersionMaster { + if h.settings.RookVersion != LocalBuildTag { // make sure we have the images from a previous release locally so the test doesn't hit a timeout assert.NoError(h.T(), h.k8shelper.GetDockerImage("rook/ceph:"+h.settings.RookVersion)) } @@ -507,7 +536,7 @@ func (h *CephInstaller) InstallRook() (bool, error) { if h.settings.UseHelm { err = h.CreateRookCephClusterViaHelm(map[string]interface{}{ - "image": "rook/ceph:master", + "image": "rook/ceph:" + LocalBuildTag, }) if err != nil { return false, errors.Wrap(err, "failed to install ceph cluster using Helm") @@ -553,6 +582,19 @@ func (h *CephInstaller) InstallRook() (bool, error) { time.Sleep(5 * time.Second) } + if h.settings.UseHelm { + logger.Infof("Confirming ceph cluster installed correctly") + if err := h.ConfirmHelmClusterInstalledCorrectly(); err != nil { + return false, errors.Wrap(err, "the ceph cluster storage CustomResources did not install correctly") + } + if !h.settings.RetainHelmDefaultStorageCRs { + err = h.RemoveRookCephClusterHelmDefaultCustomResources() + if err != nil { + return false, errors.Wrap(err, "failed to remove the default helm CustomResources") + } + } + } + logger.Infof("installed rook operator and cluster %s on k8s %s", h.settings.Namespace, h.k8sVersion) return true, nil @@ -621,6 +663,13 @@ func (h *CephInstaller) UninstallRookFromMultipleNS(manifests ...CephManifests) } if h.settings.UseHelm { + // helm rook-ceph-cluster cleanup + if h.settings.RetainHelmDefaultStorageCRs { + err = h.RemoveRookCephClusterHelmDefaultCustomResources() + if err != nil { + assert.Fail(h.T(), "failed to remove the default helm CustomResources") + } + } err = h.helmHelper.DeleteLocalRookHelmChart(namespace, CephClusterChartName) checkError(h.T(), err, fmt.Sprintf("cannot uninstall helm chart %s", CephClusterChartName)) } else { @@ -650,7 +699,7 @@ func (h *CephInstaller) UninstallRookFromMultipleNS(manifests ...CephManifests) } } - // helm cleanup + // helm operator cleanup if h.settings.UseHelm { err = h.helmHelper.DeleteLocalRookHelmChart(namespace, OperatorChartName) checkError(h.T(), err, fmt.Sprintf("cannot uninstall helm chart %s", OperatorChartName)) @@ -889,7 +938,7 @@ spec: restartPolicy: Never containers: - name: rook-cleaner - image: rook/ceph:` + VersionMaster + ` + image: rook/ceph:` + LocalBuildTag + ` securityContext: privileged: true volumeMounts: @@ -919,7 +968,7 @@ spec: restartPolicy: Never containers: - name: rook-cleaner - image: rook/ceph:` + VersionMaster + ` + image: rook/ceph:` + LocalBuildTag + ` securityContext: privileged: true volumeMounts: diff --git a/tests/framework/installer/ceph_manifests.go b/tests/framework/installer/ceph_manifests.go index 2f73a0adf46d..5c51f303bef9 100644 --- a/tests/framework/installer/ceph_manifests.go +++ b/tests/framework/installer/ceph_manifests.go @@ -18,7 +18,6 @@ package installer import ( "fmt" - "regexp" "strconv" "strings" @@ -43,7 +42,7 @@ type CephManifests interface { GetNFS(name, pool string, daemonCount int) string GetRBDMirror(name string, daemonCount int) string GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string - GetObjectStoreUser(name, displayName, store string) string + GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string GetBucketStorageClass(storeName, storageClassName, reclaimPolicy, region string) string GetOBC(obcName, storageClassName, bucketName string, maxObject string, createBucket bool) string GetClient(name string, caps map[string]string) string @@ -57,7 +56,7 @@ type CephManifestsMaster struct { // NewCephManifests gets the manifest type depending on the Rook version desired func NewCephManifests(settings *TestCephSettings) CephManifests { switch settings.RookVersion { - case VersionMaster: + case LocalBuildTag: return &CephManifestsMaster{settings} case Version1_6: return &CephManifestsV1_6{settings} @@ -83,11 +82,7 @@ func (m *CephManifestsMaster) GetOperator() string { } else { manifest = m.settings.readManifest("operator.yaml") } - manifest = m.settings.replaceOperatorSettings(manifest) - - // In release branches replace the tag with a master build since the local build has the master tag - r, _ := regexp.Compile(`image: rook/ceph:v[a-z0-9.-]+`) - return r.ReplaceAllString(manifest, "image: rook/ceph:master") + return m.settings.replaceOperatorSettings(manifest) } func (m *CephManifestsMaster) GetCommonExternal() string { @@ -280,7 +275,8 @@ spec: size: ` + replicaSize + ` targetSizeRatio: .5 requireSafeReplicaSize: false - compressionMode: aggressive + parameters: + compression_mode: aggressive mirroring: enabled: true mode: image @@ -434,11 +430,11 @@ spec: healthCheck: bucket: disabled: false - interval: 10s + interval: 5s ` } -func (m *CephManifestsMaster) GetObjectStoreUser(name string, displayName string, store string) string { +func (m *CephManifestsMaster) GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string { return `apiVersion: ceph.rook.io/v1 kind: CephObjectStoreUser metadata: @@ -446,7 +442,13 @@ metadata: namespace: ` + m.settings.Namespace + ` spec: displayName: ` + displayName + ` - store: ` + store + store: ` + store + ` + quotas: + maxBuckets: ` + strconv.Itoa(maxbuckets) + ` + maxObjects: ` + strconv.Itoa(maxobjects) + ` + maxSize: ` + maxsize + ` + capabilities: + user: ` + usercaps } //GetBucketStorageClass returns the manifest to create object bucket diff --git a/tests/framework/installer/ceph_manifests_v1.6.go b/tests/framework/installer/ceph_manifests_v1.6.go index 0bb4da38fca5..3e67bf898708 100644 --- a/tests/framework/installer/ceph_manifests_v1.6.go +++ b/tests/framework/installer/ceph_manifests_v1.6.go @@ -231,7 +231,8 @@ spec: size: ` + replicaSize + ` targetSizeRatio: .5 requireSafeReplicaSize: false - compressionMode: aggressive + parameters: + compression_mode: aggressive mirroring: enabled: true mode: image @@ -363,7 +364,7 @@ spec: ` } -func (m *CephManifestsV1_6) GetObjectStoreUser(name string, displayName string, store string) string { +func (m *CephManifestsV1_6) GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string { return `apiVersion: ceph.rook.io/v1 kind: CephObjectStoreUser metadata: diff --git a/tests/framework/installer/ceph_settings.go b/tests/framework/installer/ceph_settings.go index d8b25b4416a4..bb11e2dcd6ac 100644 --- a/tests/framework/installer/ceph_settings.go +++ b/tests/framework/installer/ceph_settings.go @@ -26,27 +26,28 @@ import ( // TestCephSettings struct for handling panic and test suite tear down type TestCephSettings struct { - DataDirHostPath string - ClusterName string - Namespace string - OperatorNamespace string - StorageClassName string - UseHelm bool - UsePVC bool - Mons int - UseCrashPruner bool - MultipleMgrs bool - SkipOSDCreation bool - UseCSI bool - EnableDiscovery bool - EnableAdmissionController bool - IsExternal bool - SkipClusterCleanup bool - SkipCleanupPolicy bool - DirectMountToolbox bool - EnableVolumeReplication bool - RookVersion string - CephVersion cephv1.CephVersionSpec + DataDirHostPath string + ClusterName string + Namespace string + OperatorNamespace string + StorageClassName string + UseHelm bool + RetainHelmDefaultStorageCRs bool + UsePVC bool + Mons int + UseCrashPruner bool + MultipleMgrs bool + SkipOSDCreation bool + UseCSI bool + EnableDiscovery bool + EnableAdmissionController bool + IsExternal bool + SkipClusterCleanup bool + SkipCleanupPolicy bool + DirectMountToolbox bool + EnableVolumeReplication bool + RookVersion string + CephVersion cephv1.CephVersionSpec } func (s *TestCephSettings) ApplyEnvVars() { diff --git a/tests/framework/installer/installer.go b/tests/framework/installer/installer.go index 929b10e9097e..719a148a10d0 100644 --- a/tests/framework/installer/installer.go +++ b/tests/framework/installer/installer.go @@ -28,8 +28,8 @@ import ( ) const ( - // VersionMaster tag for the latest manifests - VersionMaster = "master" + // LocalBuildTag tag for the latest manifests + LocalBuildTag = "local-build" // test suite names CassandraTestSuite = "cassandra" diff --git a/tests/framework/installer/nfs_installer.go b/tests/framework/installer/nfs_installer.go deleted file mode 100644 index 6edfb5ac8cf0..000000000000 --- a/tests/framework/installer/nfs_installer.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "context" - "fmt" - "testing" - - "github.com/rook/rook/tests/framework/utils" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - nfsServerCRD = "nfsservers.nfs.rook.io" -) - -type NFSInstaller struct { - k8shelper *utils.K8sHelper - manifests *NFSManifests - T func() *testing.T -} - -func NewNFSInstaller(k8shelper *utils.K8sHelper, t func() *testing.T) *NFSInstaller { - return &NFSInstaller{k8shelper, &NFSManifests{}, t} -} - -// InstallNFSServer installs NFS operator, NFS CRD instance and NFS volume -func (h *NFSInstaller) InstallNFSServer(systemNamespace, namespace string, count int) error { - h.k8shelper.CreateAnonSystemClusterBinding() - - // install hostpath provisioner if there isn't already a default storage class - storageClassName := "" - defaultExists, err := h.k8shelper.IsDefaultStorageClassPresent() - if err != nil { - return err - } else if !defaultExists { - if err := CreateHostPathPVs(h.k8shelper, 2, false, "2Mi"); err != nil { - return err - } - } else { - logger.Info("skipping install of host path provisioner because a default storage class already exists") - } - - // install nfs operator - if err := h.CreateNFSServerOperator(systemNamespace); err != nil { - return err - } - - // install nfs server instance - if err := h.CreateNFSServer(namespace, count, storageClassName); err != nil { - return err - } - - // install nfs server volume - if err := h.CreateNFSServerVolume(namespace); err != nil { - return err - } - - return nil -} - -// CreateNFSServerOperator creates nfs server in the provided namespace -func (h *NFSInstaller) CreateNFSServerOperator(namespace string) error { - logger.Infof("starting nfsserver operator") - - logger.Info("creating nfsserver CRDs") - if _, err := h.k8shelper.KubectlWithStdin(h.manifests.GetNFSServerCRDs(), createFromStdinArgs...); err != nil { - return err - } - - nfsOperator := h.manifests.GetNFSServerOperator(namespace) - _, err := h.k8shelper.KubectlWithStdin(nfsOperator, createFromStdinArgs...) - if err != nil { - return fmt.Errorf("failed to create rook-nfs-operator pod: %+v ", err) - } - - if !h.k8shelper.IsCRDPresent(nfsServerCRD) { - return fmt.Errorf("failed to find nfs CRD %s", nfsServerCRD) - } - - if !h.k8shelper.IsPodInExpectedState("rook-nfs-operator", namespace, "Running") { - return fmt.Errorf("rook-nfs-operator is not running, aborting") - } - - logger.Infof("nfs operator started") - return nil -} - -// CreateNFSServer creates the NFS Server CRD instance -func (h *NFSInstaller) CreateNFSServer(namespace string, count int, storageClassName string) error { - if err := h.k8shelper.CreateNamespace(namespace); err != nil { - return err - } - - logger.Infof("starting nfs server with kubectl and yaml") - nfsServer := h.manifests.GetNFSServer(namespace, count, storageClassName) - if _, err := h.k8shelper.KubectlWithStdin(nfsServer, createFromStdinArgs...); err != nil { - return fmt.Errorf("Failed to create nfs server: %+v ", err) - } - - if err := h.k8shelper.WaitForPodCount("app="+namespace, namespace, 1); err != nil { - logger.Errorf("nfs server pods in namespace %s not found", namespace) - return err - } - - err := h.k8shelper.WaitForLabeledPodsToRun("app="+namespace, namespace) - if err != nil { - logger.Errorf("nfs server pods in namespace %s are not running", namespace) - return err - } - - logger.Infof("nfs server started") - return nil -} - -// CreateNFSServerVolume creates NFS export PV and PVC -func (h *NFSInstaller) CreateNFSServerVolume(namespace string) error { - logger.Info("creating volume from nfs server in namespace %s", namespace) - - nfsServerPVC := h.manifests.GetNFSServerPVC(namespace) - - logger.Info("creating nfs server pvc") - if _, err := h.k8shelper.KubectlWithStdin(nfsServerPVC, createFromStdinArgs...); err != nil { - return err - } - - return nil -} - -// UninstallNFSServer uninstalls the NFS Server from the given namespace -func (h *NFSInstaller) UninstallNFSServer(systemNamespace, namespace string) { - ctx := context.TODO() - logger.Infof("uninstalling nfsserver from namespace %s", namespace) - - err := h.k8shelper.DeleteResource("pvc", "nfs-pv-claim") - checkError(h.T(), err, "cannot remove nfs pvc : nfs-pv-claim") - - err = h.k8shelper.DeleteResource("pvc", "nfs-pv-claim-bigger") - checkError(h.T(), err, "cannot remove nfs pvc : nfs-pv-claim-bigger") - - err = h.k8shelper.DeleteResource("pv", "nfs-pv") - checkError(h.T(), err, "cannot remove nfs pv : nfs-pv") - - err = h.k8shelper.DeleteResource("pv", "nfs-pv1") - checkError(h.T(), err, "cannot remove nfs pv : nfs-pv1") - - err = h.k8shelper.DeleteResource("-n", namespace, "nfsservers.nfs.rook.io", namespace) - checkError(h.T(), err, fmt.Sprintf("cannot remove nfsserver %s", namespace)) - - crdCheckerFunc := func() error { - _, err := h.k8shelper.RookClientset.NfsV1alpha1().NFSServers(namespace).Get(ctx, namespace, metav1.GetOptions{}) - return err - } - err = h.k8shelper.WaitForCustomResourceDeletion(namespace, namespace, crdCheckerFunc) - checkError(h.T(), err, fmt.Sprintf("failed to wait for crd %s deletion", namespace)) - - err = h.k8shelper.DeleteResource("namespace", namespace) - checkError(h.T(), err, fmt.Sprintf("cannot delete namespace %s", namespace)) - - logger.Infof("removing the operator from namespace %s", systemNamespace) - err = h.k8shelper.DeleteResource("crd", "nfsservers.nfs.rook.io") - checkError(h.T(), err, "cannot delete CRDs") - - nfsOperator := h.manifests.GetNFSServerOperator(systemNamespace) - _, err = h.k8shelper.KubectlWithStdin(nfsOperator, deleteFromStdinArgs...) - checkError(h.T(), err, "cannot uninstall rook-nfs-operator") - - err = DeleteHostPathPVs(h.k8shelper) - checkError(h.T(), err, "cannot uninstall hostpath provisioner") - - h.k8shelper.Clientset.RbacV1().ClusterRoleBindings().Delete(ctx, "anon-user-access", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - h.k8shelper.Clientset.RbacV1().ClusterRoleBindings().Delete(ctx, "run-nfs-client-provisioner", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - h.k8shelper.Clientset.RbacV1().ClusterRoles().Delete(ctx, "nfs-client-provisioner-runner", metav1.DeleteOptions{}) //nolint // asserting this failing in CI - logger.Infof("done removing the operator from namespace %s", systemNamespace) -} - -// GatherAllNFSServerLogs gathers all NFS Server logs -func (h *NFSInstaller) GatherAllNFSServerLogs(systemNamespace, namespace, testName string) { - if !h.T().Failed() && TestLogCollectionLevel() != "all" { - return - } - logger.Infof("Gathering all logs from NFSServer %s", namespace) - h.k8shelper.GetLogsFromNamespace(systemNamespace, testName, utils.TestEnvName()) - h.k8shelper.GetLogsFromNamespace(namespace, testName, utils.TestEnvName()) -} diff --git a/tests/framework/installer/nfs_manifests.go b/tests/framework/installer/nfs_manifests.go deleted file mode 100644 index b5a270271ab8..000000000000 --- a/tests/framework/installer/nfs_manifests.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package installer - -import ( - "strconv" - "strings" -) - -type NFSManifests struct { -} - -// GetNFSServerCRDs returns NFSServer CRD definition -func (n *NFSManifests) GetNFSServerCRDs() string { - manifest := readManifest("nfs", "crds.yaml") - logger.Info(manifest) - return manifest -} - -// GetNFSServerOperator returns the NFSServer operator definition -func (n *NFSManifests) GetNFSServerOperator(namespace string) string { - manifest := readManifest("nfs", "operator.yaml") - manifest = strings.ReplaceAll(manifest, "rook-nfs-system # namespace:operator", namespace) - return manifest -} - -// GetNFSServerPV returns NFSServer PV definition -func (n *NFSManifests) GetNFSServerPV(namespace string, clusterIP string) string { - return `apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv - namespace: ` + namespace + ` - annotations: - volume.beta.kubernetes.io/mount-options: "vers=4.1" -spec: - storageClassName: nfs-sc - capacity: - storage: 1Mi - accessModes: - - ReadWriteMany - nfs: - server: ` + clusterIP + ` - path: "/test-claim" ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv1 - namespace: ` + namespace + ` - annotations: - volume.beta.kubernetes.io/mount-options: "vers=4.1" -spec: - storageClassName: nfs-sc - capacity: - storage: 2Mi - accessModes: - - ReadWriteMany - nfs: - server: ` + clusterIP + ` - path: "/test-claim1" -` -} - -// GetNFSServerPVC returns NFSServer PVC definition -func (n *NFSManifests) GetNFSServerPVC(namespace string) string { - return ` ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: nfs-ns-nfs-share -parameters: - exportName: nfs-share - nfsServerName: ` + namespace + ` - nfsServerNamespace: ` + namespace + ` -provisioner: nfs.rook.io/` + namespace + `-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: nfs-ns-nfs-share1 -parameters: - exportName: nfs-share1 - nfsServerName: ` + namespace + ` - nfsServerNamespace: ` + namespace + ` -provisioner: nfs.rook.io/` + namespace + `-provisioner -reclaimPolicy: Delete -volumeBindingMode: Immediate ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-pv-claim -spec: - storageClassName: nfs-ns-nfs-share - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-pv-claim-bigger -spec: - storageClassName: nfs-ns-nfs-share1 - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Mi -` -} - -// GetNFSServer returns NFSServer CRD instance definition -func (n *NFSManifests) GetNFSServer(namespace string, count int, storageClassName string) string { - return ` -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server - namespace: ` + namespace + ` ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["extensions"] - resources: ["podsecuritypolicies"] - resourceNames: ["nfs-provisioner"] - verbs: ["use"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: - - nfs.rook.io - resources: - - "*" - verbs: - - "*" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-provisioner -subjects: - - kind: ServiceAccount - name: rook-nfs-server - namespace: ` + namespace + ` -roleRef: - kind: ClusterRole - name: rook-nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: test-claim - namespace: ` + namespace + ` -spec: - storageClassName: ` + storageClassName + ` - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: test-claim1 - namespace: ` + namespace + ` -spec: - storageClassName: ` + storageClassName + ` - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Mi ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: ` + namespace + ` - namespace: ` + namespace + ` -spec: - replicas: ` + strconv.Itoa(count) + ` - exports: - - name: nfs-share - server: - accessMode: ReadWrite - squash: "none" - persistentVolumeClaim: - claimName: test-claim - - name: nfs-share1 - server: - accessMode: ReadWrite - squash: "none" - persistentVolumeClaim: - claimName: test-claim1 -` -} diff --git a/tests/framework/installer/settings.go b/tests/framework/installer/settings.go index 033cf957a4f4..4b268b2fbf09 100644 --- a/tests/framework/installer/settings.go +++ b/tests/framework/installer/settings.go @@ -21,12 +21,15 @@ import ( "io/ioutil" "net/http" "path" + "regexp" "time" "github.com/pkg/errors" "github.com/rook/rook/tests/framework/utils" ) +var imageMatch = regexp.MustCompile(`image: rook\/ceph:[a-z0-9.-]+`) + func readManifest(provider, filename string) string { rootDir, err := utils.FindRookRoot() if err != nil { @@ -38,11 +41,15 @@ func readManifest(provider, filename string) string { if err != nil { panic(errors.Wrapf(err, "failed to read manifest at %s", manifest)) } - return string(contents) + return imageMatch.ReplaceAllString(string(contents), "image: rook/ceph:"+LocalBuildTag) } func readManifestFromGithub(rookVersion, provider, filename string) string { url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s/cluster/examples/kubernetes/%s/%s", rookVersion, provider, filename) + return readManifestFromURL(url) +} + +func readManifestFromURL(url string) string { logger.Infof("Retrieving manifest: %s", url) var response *http.Response var err error diff --git a/tests/framework/utils/env.go b/tests/framework/utils/env.go index 9d26ad41c824..439bfc749063 100644 --- a/tests/framework/utils/env.go +++ b/tests/framework/utils/env.go @@ -29,7 +29,7 @@ func TestEnvName() string { // TestRetryNumber get the max retry. Example, for OpenShift it's 40. func TestRetryNumber() int { - count := GetEnvVarWithDefault("RETRY_MAX", "30") + count := GetEnvVarWithDefault("RETRY_MAX", "45") number, err := strconv.Atoi(count) if err != nil { panic(fmt.Errorf("Error when converting to numeric value %v", err)) diff --git a/tests/integration/ceph_base_file_test.go b/tests/integration/ceph_base_file_test.go index f2acba483cb3..2f47439268fb 100644 --- a/tests/integration/ceph_base_file_test.go +++ b/tests/integration/ceph_base_file_test.go @@ -385,9 +385,24 @@ func createFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite logger.Infof("Create file System") fscErr := helper.FSClient.Create(filesystemName, settings.Namespace, activeCount) require.Nil(s.T(), fscErr) - logger.Infof("File system %s created", filesystemName) + var err error - filesystemList, _ := helper.FSClient.List(settings.Namespace) + var filesystemList []cephclient.CephFilesystem + for i := 1; i <= 10; i++ { + filesystemList, err = helper.FSClient.List(settings.Namespace) + if err != nil { + logger.Errorf("failed to list fs. trying again. %v", err) + continue + } + logger.Debugf("filesystemList is %+v", filesystemList) + if len(filesystemList) == 1 { + logger.Infof("File system %s created", filesystemList[0].Name) + break + } + logger.Infof("Waiting for file system %s to be created", filesystemName) + time.Sleep(time.Second * 5) + } + logger.Debugf("filesystemList is %+v", filesystemList) require.Equal(s.T(), 1, len(filesystemList), "There should be one shared file system present") } diff --git a/tests/integration/ceph_base_object_test.go b/tests/integration/ceph_base_object_test.go index 99adf7deca59..39f15daff4b8 100644 --- a/tests/integration/ceph_base_object_test.go +++ b/tests/integration/ceph_base_object_test.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" "testing" "time" @@ -48,11 +49,16 @@ var ( ObjectKey1 = "rookObj1" ObjectKey2 = "rookObj2" ObjectKey3 = "rookObj3" + ObjectKey4 = "rookObj4" contentType = "plain/text" obcName = "smoke-delete-bucket" region = "us-east-1" maxObject = "2" + newMaxObject = "3" bucketStorageClassName = "rook-smoke-delete-bucket" + maxBucket = 1 + maxSize = "100000" + userCap = "read" ) // Smoke Test for ObjectStore - Test check the following operations on ObjectStore in order @@ -60,16 +66,17 @@ var ( // Check issues in MGRs, Delete Bucket and Delete user // Test for ObjectStore with and without TLS enabled func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string) { - storeName := "teststore" - logger.Info("Object Storage End To End Integration Test without TLS - Create Object Store, User,Bucket and read/write to bucket") - logger.Info("Running on Rook Cluster %s", namespace) - createCephObjectStore(s, helper, k8sh, namespace, storeName, 3, false) - testObjectStoreOperations(s, helper, k8sh, namespace, storeName) - - storeName = "tlsteststore" + storeName := "tlsteststore" logger.Info("Object Storage End To End Integration Test with TLS enabled - Create Object Store, User,Bucket and read/write to bucket") + logger.Infof("Running on Rook Cluster %s", namespace) createCephObjectStore(s, helper, k8sh, namespace, storeName, 3, true) testObjectStoreOperations(s, helper, k8sh, namespace, storeName) + + storeName = "teststore" + logger.Info("Object Storage End To End Integration Test without TLS - Create Object Store, User,Bucket and read/write to bucket") + logger.Infof("Running on Rook Cluster %s", namespace) + createCephObjectStore(s, helper, k8sh, namespace, storeName, 3, false) + testObjectStoreOperations(s, helper, k8sh, namespace, storeName) } // Test Object StoreCreation on Rook that was installed via helm @@ -108,11 +115,11 @@ func objectStoreCleanUp(s suite.Suite, helper *clients.TestClient, k8sh *utils.K func createCephObjectUser( s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName, userID string, - checkPhase bool, -) { + checkPhase, checkQuotaAndCaps bool) { s.T().Helper() - - cosuErr := helper.ObjectUserClient.Create(namespace, userID, userdisplayname, storeName) + maxObjectInt, err := strconv.Atoi(maxObject) + assert.Nil(s.T(), err) + cosuErr := helper.ObjectUserClient.Create(userID, userdisplayname, storeName, userCap, maxSize, maxBucket, maxObjectInt) assert.Nil(s.T(), cosuErr) logger.Infof("Waiting 5 seconds for the object user to be created") time.Sleep(5 * time.Second) @@ -122,13 +129,13 @@ func createCephObjectUser( time.Sleep(5 * time.Second) } - checkCephObjectUser(s, helper, k8sh, namespace, storeName, userID, checkPhase) + checkCephObjectUser(s, helper, k8sh, namespace, storeName, userID, checkPhase, checkQuotaAndCaps) } func checkCephObjectUser( s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName, userID string, - checkPhase bool, + checkPhase, checkQuotaAndCaps bool, ) { s.T().Helper() @@ -146,6 +153,17 @@ func checkCephObjectUser( assert.NoError(s.T(), err) assert.Equal(s.T(), k8sutil.ReadyStatus, phase) } + if checkQuotaAndCaps { + // following fields in CephObjectStoreUser CRD doesn't exist before Rook v1.7.4 + maxObjectInt, err := strconv.Atoi(maxObject) + assert.Nil(s.T(), err) + maxSizeInt, err := strconv.Atoi(maxSize) + assert.Nil(s.T(), err) + assert.Equal(s.T(), maxBucket, userInfo.MaxBuckets) + assert.Equal(s.T(), int64(maxObjectInt), *userInfo.UserQuota.MaxObjects) + assert.Equal(s.T(), int64(maxSizeInt), *userInfo.UserQuota.MaxSize) + assert.Equal(s.T(), userCap, userInfo.Caps[0].Perm) + } } func createCephObjectStore(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string, replicaSize int, tlsEnable bool) { @@ -167,6 +185,7 @@ func createCephObjectStore(s suite.Suite, helper *clients.TestClient, k8sh *util } assert.True(s.T(), k8sh.CheckPodCountAndState("rook-ceph-rgw", namespace, 1, "Running")) logger.Info("RGW pods are running") + assert.NoError(t, k8sh.WaitForLabeledDeploymentsToBeReady("app=rook-ceph-rgw", namespace)) logger.Infof("Object store %q created successfully", storeName) }) } @@ -175,8 +194,8 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * ctx := context.TODO() clusterInfo := client.AdminClusterInfo(namespace) t := s.T() - t.Run("create CephObjectStoreUser", func(t *testing.T) { - createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true) + t.Run(fmt.Sprintf("create CephObjectStoreUser %q", storeName), func(t *testing.T) { + createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true, true) i := 0 for i = 0; i < 4; i++ { if helper.ObjectUserClient.UserSecretExists(namespace, storeName, userid) { @@ -189,13 +208,20 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * }) // Check object store status - t.Run("verify CephObjectStore status", func(t *testing.T) { + t.Run(fmt.Sprintf("verify ceph object store %q status", storeName), func(t *testing.T) { + retryCount := 30 i := 0 - for i = 0; i < 4; i++ { + for i = 0; i < retryCount; i++ { objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) assert.Nil(s.T(), err) if objectStore.Status == nil || objectStore.Status.BucketStatus == nil { - logger.Infof("(%d) bucket status check sleeping for 5 seconds ...", i) + logger.Infof("(%d) object status check sleeping for 5 seconds ...%+v", i, objectStore.Status) + time.Sleep(5 * time.Second) + continue + } + logger.Info("objectstore status is", objectStore.Status) + if objectStore.Status.BucketStatus.Health == cephv1.ConditionFailure { + logger.Infof("(%d) bucket status check sleeping for 5 seconds ...%+v", i, objectStore.Status.BucketStatus) time.Sleep(5 * time.Second) continue } @@ -205,7 +231,9 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.NotEmpty(s.T(), objectStore.Status.Info["endpoint"]) break } - assert.NotEqual(t, 4, i) + if i == retryCount { + t.Fatal("bucket status check failed. status is not connected") + } }) context := k8sh.MakeContext() @@ -248,9 +276,9 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) if objectStore.Spec.IsTLSEnabled() { - s3client, err = rgw.NewTestOnlyS3Agent(s3AccessKey, s3SecretKey, s3endpoint, true) + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true) } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, true, nil) + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true, nil) } assert.Nil(s.T(), err) @@ -275,11 +303,27 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.Error(s.T(), poErr) }) + t.Run("test update quota on OBC bucket", func(t *testing.T) { + poErr := helper.BucketClient.UpdateObc(obcName, bucketStorageClassName, bucketname, newMaxObject, true) + assert.Nil(s.T(), poErr) + updated := utils.Retry(5, 2*time.Second, "OBC is updated", func() bool { + return helper.BucketClient.CheckOBMaxObject(obcName, newMaxObject) + }) + assert.True(s.T(), updated) + logger.Infof("Testing the updated object limit") + _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey3, contentType) + assert.NoError(s.T(), poErr) + _, poErr = s3client.PutObjectInBucket(bucketname, ObjBody, ObjectKey4, contentType) + assert.Error(s.T(), poErr) + }) + t.Run("delete objects on OBC bucket", func(t *testing.T) { _, delobjErr := s3client.DeleteObjectInBucket(bucketname, ObjectKey1) assert.Nil(s.T(), delobjErr) _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey2) assert.Nil(s.T(), delobjErr) + _, delobjErr = s3client.DeleteObjectInBucket(bucketname, ObjectKey3) + assert.Nil(s.T(), delobjErr) logger.Info("Objects deleted on bucket successfully") }) }) diff --git a/tests/integration/ceph_flex_test.go b/tests/integration/ceph_flex_test.go index 93caa1e3fb80..443df67b5cbb 100644 --- a/tests/integration/ceph_flex_test.go +++ b/tests/integration/ceph_flex_test.go @@ -94,7 +94,7 @@ func (s *CephFlexDriverSuite) SetupSuite() { SkipOSDCreation: false, UseCSI: false, DirectMountToolbox: true, - RookVersion: installer.VersionMaster, + RookVersion: installer.LocalBuildTag, CephVersion: installer.OctopusVersion, } s.settings.ApplyEnvVars() diff --git a/tests/integration/ceph_helm_test.go b/tests/integration/ceph_helm_test.go index 6efe9f89adff..b0a1f5d741e9 100644 --- a/tests/integration/ceph_helm_test.go +++ b/tests/integration/ceph_helm_test.go @@ -73,7 +73,7 @@ func (h *HelmSuite) SetupSuite() { SkipOSDCreation: false, EnableAdmissionController: false, EnableDiscovery: true, - RookVersion: installer.VersionMaster, + RookVersion: installer.LocalBuildTag, CephVersion: installer.OctopusVersion, } h.settings.ApplyEnvVars() diff --git a/tests/integration/ceph_mgr_test.go b/tests/integration/ceph_mgr_test.go index 3f03ebb02815..406386238a2c 100644 --- a/tests/integration/ceph_mgr_test.go +++ b/tests/integration/ceph_mgr_test.go @@ -101,7 +101,7 @@ func (s *CephMgrSuite) SetupSuite() { Mons: 1, UseCSI: true, SkipOSDCreation: true, - RookVersion: installer.VersionMaster, + RookVersion: installer.LocalBuildTag, CephVersion: installer.MasterVersion, } s.settings.ApplyEnvVars() diff --git a/tests/integration/ceph_multi_cluster_test.go b/tests/integration/ceph_multi_cluster_test.go index 4959678c83ab..85103769851d 100644 --- a/tests/integration/ceph_multi_cluster_test.go +++ b/tests/integration/ceph_multi_cluster_test.go @@ -87,7 +87,7 @@ func (s *MultiClusterDeploySuite) SetupSuite() { UseCSI: true, MultipleMgrs: true, EnableAdmissionController: true, - RookVersion: installer.VersionMaster, + RookVersion: installer.LocalBuildTag, CephVersion: installer.NautilusVersion, } s.settings.ApplyEnvVars() diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go index 95074361433c..052d37f2afdd 100644 --- a/tests/integration/ceph_smoke_test.go +++ b/tests/integration/ceph_smoke_test.go @@ -99,14 +99,12 @@ func (s *SmokeSuite) SetupSuite() { UseCSI: true, EnableAdmissionController: true, UseCrashPruner: true, - RookVersion: installer.VersionMaster, + EnableVolumeReplication: true, + RookVersion: installer.LocalBuildTag, CephVersion: installer.PacificVersion, } s.settings.ApplyEnvVars() s.installer, s.k8sh = StartTestCluster(s.T, s.settings, smokeSuiteMinimalTestVersion) - if s.k8sh.VersionAtLeast("v1.16.0") { - s.settings.EnableVolumeReplication = true - } s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests) } diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go index e7f2a0c88ed9..4cec18485353 100644 --- a/tests/integration/ceph_upgrade_test.go +++ b/tests/integration/ceph_upgrade_test.go @@ -135,7 +135,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { logger.Infof("Initializing object user before the upgrade") objectUserID := "upgraded-user" - createCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, false) + createCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, false, false) logger.Info("Initializing object bucket claim before the upgrade") bucketStorageClassName := "rook-smoke-delete-bucket" @@ -183,7 +183,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { s.gatherLogs(s.settings.OperatorNamespace, "_before_master_upgrade") s.upgradeToMaster() - s.verifyOperatorImage(installer.VersionMaster) + s.verifyOperatorImage(installer.LocalBuildTag) s.verifyRookUpgrade(numOSDs) err = s.installer.WaitForToolbox(s.namespace) assert.NoError(s.T(), err) @@ -194,7 +194,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { rbdFilesToRead = append(rbdFilesToRead, newFile) cephfsFilesToRead = append(cephfsFilesToRead, newFile) - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) // should be Bound after upgrade to Rook master // do not need retry b/c the OBC controller runs parallel to Rook-Ceph orchestration @@ -213,7 +213,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) logger.Infof("Verified upgrade from nautilus to octopus") - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) // // Upgrade from octopus to pacific @@ -226,7 +226,7 @@ func (s *UpgradeSuite) TestUpgradeToMaster() { s.verifyFilesAfterUpgrade(filesystemName, newFile, message, rbdFilesToRead, cephfsFilesToRead) logger.Infof("Verified upgrade from octopus to pacific") - checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true) + checkCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, objectStoreName, objectUserID, true, false) } func (s *UpgradeSuite) gatherLogs(systemNamespace, testSuffix string) { @@ -359,15 +359,15 @@ func (s *UpgradeSuite) verifyFilesAfterUpgrade(fsName, newFileToWrite, messageFo // verify the upgrade but merely starts the upgrade process. func (s *UpgradeSuite) upgradeToMaster() { // Apply the CRDs for the latest master - s.settings.RookVersion = installer.VersionMaster + s.settings.RookVersion = installer.LocalBuildTag m := installer.NewCephManifests(s.settings) require.NoError(s.T(), s.k8sh.ResourceOperation("apply", m.GetCRDs(s.k8sh))) require.NoError(s.T(), s.k8sh.ResourceOperation("apply", m.GetCommon())) require.NoError(s.T(), - s.k8sh.SetDeploymentVersion(s.settings.OperatorNamespace, operatorContainer, operatorContainer, installer.VersionMaster)) + s.k8sh.SetDeploymentVersion(s.settings.OperatorNamespace, operatorContainer, operatorContainer, installer.LocalBuildTag)) require.NoError(s.T(), - s.k8sh.SetDeploymentVersion(s.settings.Namespace, "rook-ceph-tools", "rook-ceph-tools", installer.VersionMaster)) + s.k8sh.SetDeploymentVersion(s.settings.Namespace, "rook-ceph-tools", "rook-ceph-tools", installer.LocalBuildTag)) } diff --git a/tests/integration/nfs_test.go b/tests/integration/nfs_test.go deleted file mode 100644 index 9dc6e33fdeb4..000000000000 --- a/tests/integration/nfs_test.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "testing" - "time" - - "github.com/rook/rook/tests/framework/clients" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "k8s.io/apimachinery/pkg/util/version" -) - -// ******************************************************* -// *** Major scenarios tested by the NfsSuite *** -// Setup -// - via the server CRD with very simple properties -// - 1 replica -// - Default server permissions -// - Mount a NFS export and write data to it and verify -// ******************************************************* -func TestNfsSuite(t *testing.T) { - if installer.SkipTestSuite(installer.NFSTestSuite) { - t.Skip() - } - - s := new(NfsSuite) - defer func(s *NfsSuite) { - HandlePanics(recover(), s.Teardown, s.T) - }(s) - suite.Run(t, s) -} - -type NfsSuite struct { - suite.Suite - k8shelper *utils.K8sHelper - installer *installer.NFSInstaller - rwClient *clients.ReadWriteOperation - namespace string - systemNamespace string - instanceCount int -} - -func (s *NfsSuite) SetupSuite() { - s.Setup() -} - -func (s *NfsSuite) TearDownSuite() { - s.Teardown() -} - -func (s *NfsSuite) Setup() { - s.namespace = "rook-nfs" - s.systemNamespace = installer.SystemNamespace(s.namespace) - s.instanceCount = 1 - - k8shelper, err := utils.CreateK8sHelper(s.T) - v := version.MustParseSemantic(k8shelper.GetK8sServerVersion()) - if !v.AtLeast(version.MustParseSemantic("1.14.0")) { - logger.Info("Skipping NFS tests when not at least K8s v1.14") - s.T().Skip() - } - - require.NoError(s.T(), err) - s.k8shelper = k8shelper - - k8sversion := s.k8shelper.GetK8sServerVersion() - logger.Infof("Installing nfs server on k8s %s", k8sversion) - - s.installer = installer.NewNFSInstaller(s.k8shelper, s.T) - - s.rwClient = clients.CreateReadWriteOperation(s.k8shelper) - - err = s.installer.InstallNFSServer(s.systemNamespace, s.namespace, s.instanceCount) - if err != nil { - logger.Errorf("nfs server installation failed: %+v", err) - s.T().Fail() - s.Teardown() - s.T().FailNow() - } -} - -func (s *NfsSuite) Teardown() { - s.installer.GatherAllNFSServerLogs(s.systemNamespace, s.namespace, s.T().Name()) - s.installer.UninstallNFSServer(s.systemNamespace, s.namespace) -} - -func (s *NfsSuite) TestNfsServerInstallation() { - logger.Infof("Verifying that nfs server pod %s is running", s.namespace) - - // verify nfs server operator is running OK - assert.True(s.T(), s.k8shelper.CheckPodCountAndState("rook-nfs-operator", s.systemNamespace, 1, "Running"), - "1 rook-nfs-operator must be in Running state") - - // verify nfs server instances are running OK - assert.True(s.T(), s.k8shelper.CheckPodCountAndState(s.namespace, s.namespace, s.instanceCount, "Running"), - fmt.Sprintf("%d rook-nfs pods must be in Running state", s.instanceCount)) - - // verify bigger export is running OK - assert.True(s.T(), true, s.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim-bigger")) - - podList, err := s.rwClient.CreateWriteClient("nfs-pv-claim-bigger") - require.NoError(s.T(), err) - assert.True(s.T(), true, s.checkReadData(podList)) - err = s.rwClient.Delete() - assert.NoError(s.T(), err) - - // verify another smaller export is running OK - assert.True(s.T(), true, s.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim")) - - defer s.rwClient.Delete() //nolint // delete a nfs consuming pod in rook - podList, err = s.rwClient.CreateWriteClient("nfs-pv-claim") - require.NoError(s.T(), err) - assert.True(s.T(), true, s.checkReadData(podList)) -} - -func (s *NfsSuite) checkReadData(podList []string) bool { - var result string - var err error - // the following for loop retries to read data from the first pod in the pod list - for i := 0; i < utils.RetryLoop; i++ { - // the nfs volume is mounted on "/mnt" and the data(hostname of the pod) is written in "/mnt/data" of the pod - // results stores the hostname of either one of the pod which is same as the pod name, which is read from "/mnt/data" - result, err = s.rwClient.Read(podList[0]) - logger.Infof("nfs volume read exited, err: %+v. result: %s", err, result) - if err == nil { - break - } - logger.Warning("nfs volume read failed, will try again") - time.Sleep(utils.RetryInterval * time.Second) - } - require.NoError(s.T(), err) - // the value of result must be same as the name of pod. - if result == podList[0] || result == podList[1] { - return true - } - - return false -} diff --git a/tests/integration/z_cassandra_test.go b/tests/integration/z_cassandra_test.go deleted file mode 100644 index d0eb334ba6ad..000000000000 --- a/tests/integration/z_cassandra_test.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "os" - "strings" - "testing" - "time" - - cassandrav1alpha1 "github.com/rook/rook/pkg/apis/cassandra.rook.io/v1alpha1" - "github.com/rook/rook/tests/framework/installer" - "github.com/rook/rook/tests/framework/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ************************************************ -// *** Major scenarios tested by the CassandraSuite *** -// Setup -// - via the cluster CRD with very simple properties -// - 1 replica -// - 1 CPU -// - 2GB memory -// - 5Gi volume from default provider -// ************************************************ - -type CassandraSuite struct { - suite.Suite - k8sHelper *utils.K8sHelper - installer *installer.CassandraInstaller - namespace string - systemNamespace string - instanceCount int -} - -// TestCassandraSuite initiates the CassandraSuite -func TestCassandraSuite(t *testing.T) { - if installer.SkipTestSuite(installer.CassandraTestSuite) { - t.Skip() - } - if os.Getenv("SKIP_CASSANDRA_TESTS") == "true" { - t.Skip() - } - - s := new(CassandraSuite) - defer func(s *CassandraSuite) { - r := recover() - if r != nil { - logger.Infof("unexpected panic occurred during test %s, --> %v", t.Name(), r) - t.Fail() - s.Teardown() - t.FailNow() - } - }(s) - suite.Run(t, s) -} - -// SetupSuite runs once at the beginning of the suite, -// before any tests are run. -func (s *CassandraSuite) SetupSuite() { - - s.namespace = "cassandra-ns" - s.systemNamespace = installer.SystemNamespace(s.namespace) - s.instanceCount = 1 - - k8sHelper, err := utils.CreateK8sHelper(s.T) - require.NoError(s.T(), err) - s.k8sHelper = k8sHelper - - k8sVersion := s.k8sHelper.GetK8sServerVersion() - logger.Infof("Installing Cassandra on K8s %s", k8sVersion) - - s.installer = installer.NewCassandraInstaller(s.k8sHelper, s.T) - - if err = s.installer.InstallCassandra(s.systemNamespace, s.namespace, s.instanceCount, cassandrav1alpha1.ClusterModeCassandra); err != nil { - logger.Errorf("Cassandra was not installed successfully: %s", err.Error()) - s.T().Fail() - s.Teardown() - s.T().FailNow() - } -} - -// BeforeTest runs before every test in the CassandraSuite. -func (s *CassandraSuite) TeardownSuite() { - s.Teardown() -} - -/////////// -// Tests // -/////////// - -// TestCassandraClusterCreation tests the creation of a Cassandra cluster. -func (s *CassandraSuite) TestCassandraClusterCreation() { - s.CheckClusterHealth() -} - -// TestScyllaClusterCreation tests the creation of a Scylla cluster. -// func (s *CassandraSuite) TestScyllaClusterCreation() { -// s.CheckClusterHealth() -// } - -////////////////////// -// Helper Functions // -////////////////////// - -// Teardown gathers logs and other helping info and then uninstalls -// everything installed by the CassandraSuite -func (s *CassandraSuite) Teardown() { - s.installer.GatherAllCassandraLogs(s.systemNamespace, s.namespace, s.T().Name()) - s.installer.UninstallCassandra(s.systemNamespace, s.namespace) -} - -// CheckClusterHealth checks if all Pods in the cluster are ready -// and CQL is working. -func (s *CassandraSuite) CheckClusterHealth() { - // Verify that cassandra-operator is running - operatorName := "rook-cassandra-operator" - logger.Infof("Verifying that all expected pods of cassandra operator are ready") - ready := utils.Retry(10, 30*time.Second, - "Waiting for Cassandra operator to be ready", func() bool { - sts, err := s.k8sHelper.Clientset.AppsV1().StatefulSets(s.systemNamespace).Get(context.TODO(), operatorName, v1.GetOptions{}) - if err != nil { - logger.Errorf("Error getting Cassandra operator `%s`", operatorName) - return false - } - if sts.Generation != sts.Status.ObservedGeneration { - logger.Infof("Operator Statefulset has not converged yet") - return false - } - if sts.Status.UpdatedReplicas != *sts.Spec.Replicas { - logger.Error("Operator StatefulSet is rolling updating") - return false - } - if sts.Status.ReadyReplicas != *sts.Spec.Replicas { - logger.Infof("Statefulset not ready. Got: %v, Want: %v", - sts.Status.ReadyReplicas, sts.Spec.Replicas) - return false - } - return true - }) - assert.True(s.T(), ready, "Timed out waiting for Cassandra operator to become ready") - - // Verify cassandra cluster instances are running OK - clusterName := "cassandra-ns" - clusterNamespace := "cassandra-ns" - ready = utils.Retry(10, 30*time.Second, - "Waiting for Cassandra cluster to be ready", func() bool { - c, err := s.k8sHelper.RookClientset.CassandraV1alpha1().Clusters(clusterNamespace).Get(context.TODO(), clusterName, v1.GetOptions{}) - if err != nil { - logger.Errorf("Error getting Cassandra cluster `%s`", clusterName) - return false - } - for rackName, rack := range c.Status.Racks { - var desiredMembers int32 - for _, r := range c.Spec.Datacenter.Racks { - if r.Name == rackName { - desiredMembers = r.Members - break - } - } - if !(desiredMembers == rack.Members && rack.Members == rack.ReadyMembers) { - logger.Infof("Rack `%s` is not ready yet", rackName) - return false - } - } - return true - }) - assert.True(s.T(), ready, "Timed out waiting for Cassandra cluster to become ready") - - // Determine a pod name for the cluster - podName := "cassandra-ns-us-east-1-us-east-1a-0" - - // Get the Pod's IP address - command := "hostname" - commandArgs := []string{"-i"} - podIP, err := s.k8sHelper.Exec(s.namespace, podName, command, commandArgs) - assert.NoError(s.T(), err) - - command = "cqlsh" - commandArgs = []string{ - "-e", - ` -CREATE KEYSPACE IF NOT EXISTS test WITH REPLICATION = { -'class': 'SimpleStrategy', -'replication_factor': 1 -}; -USE test; -CREATE TABLE IF NOT EXISTS map (key text, value text, PRIMARY KEY(key)); -INSERT INTO map (key, value) VALUES('test_key', 'test_value'); -SELECT key,value FROM map WHERE key='test_key';`, - podIP, - } - - time.Sleep(30 * time.Second) - var result string - for i := 0; i < utils.RetryLoop; i++ { - result, err = s.k8sHelper.Exec(s.namespace, podName, command, commandArgs) - logger.Infof("cassandra cql command exited, err: %v. result: %s", err, result) - if err == nil { - break - } - logger.Warning("cassandra cql command failed, will try again") - time.Sleep(utils.RetryInterval * time.Second) - } - - assert.NoError(s.T(), err) - assert.True(s.T(), strings.Contains(result, "test_key")) - assert.True(s.T(), strings.Contains(result, "test_value")) -} diff --git a/tests/manifests/test-kms-vault-spec.yaml b/tests/manifests/test-kms-vault-spec.yaml index 1fde7755406b..6848fe48d69b 100644 --- a/tests/manifests/test-kms-vault-spec.yaml +++ b/tests/manifests/test-kms-vault-spec.yaml @@ -6,6 +6,8 @@ spec: VAULT_ADDR: https://vault.default.svc.cluster.local:8200 VAULT_BACKEND_PATH: rook/ver1 VAULT_SECRET_ENGINE: kv - VAULT_BACKEND: v1 VAULT_SKIP_VERIFY: "true" + VAULT_CLIENT_KEY: "vault-client-key" + VAULT_CLIENT_CERT: "vault-client-cert" + VAULT_CACERT: "vault-ca-cert" tokenSecretName: rook-vault-token diff --git a/tests/scripts/auto-grow-storage.sh b/tests/scripts/auto-grow-storage.sh new file mode 100755 index 000000000000..15a6a264970b --- /dev/null +++ b/tests/scripts/auto-grow-storage.sh @@ -0,0 +1,263 @@ +#!/usr/bin/env bash + +############# +# FUNCTIONS # +############# + +function calculateSize() { + local currentsize=$2 + local unit=$1 + rawsizeValue=0 # rawsizeValue is a global variable + + if [[ "$currentsize" == *"Mi" ]] + then + rawSize=${currentsize//Mi} # rawSize is a global variable + unitSize="Mi" + rawsizeValue=$rawSize + elif [[ "$currentsize" == *"Gi" ]] + then + rawSize=${currentsize//Gi} + unitSize="Gi" + rawsizeValue=$(( rawSize * 1000 )) + elif [[ "$currentsize" == *"Ti" ]] + then + rawSize=${currentsize//Ti} + unitSize="Ti" + rawsizeValue=$(( rawSize * 1000000 )) + else + echo "Unknown unit of $unit : ${currentsize}" + echo "Supported units are 'Mi','Gi','Ti'" + exit 1 + fi +} + +function compareSizes() { + local newsize=$1 + local maxsize=$2 + calculateSize newsize "${newsize}" # rawsizeValue is calculated and used for further process + local newsize=$rawsizeValue + calculateSize maxsize "${maxsize}" + local maxsize=$rawsizeValue + if [ "${newsize}" -ge "${maxsize}" ] + then + return "1" + fi + return "0" +} + +function growVertically() { + local growRate=$1 + local pvc=$2 + local ns=$3 + local maxSize=$4 + local currentSize + currentSize=$(kubectl get pvc "${pvc}" -n "${ns}" -o json | jq -r '.spec.resources.requests.storage') + echo "PVC(OSD) current size is ${currentSize} and will be increased by ${growRate}%." + + calculateSize "${pvc}" "${currentSize}" # rawSize is calculated and used for further process + + if ! [[ "${rawSize}" =~ ^[0-9]+$ ]] + then + echo "disk size should be an integer" + else + newSize=$(echo "${rawSize}+(${rawSize} * ${growRate})/100" | bc | cut -f1 -d'.') + if [ "${newSize}" = "${rawSize}" ] + then + newSize=$(( rawSize + 1 )) + echo "New adjusted calculated size for the PVC is ${newSize}${unitSize}" + else + echo "New calculated size for the PVC is ${newSize}${unitSize}" + fi + + compareSizes ${newSize}${unitSize} "${maxSize}" + if [ "1" = $? ] + then + newSize=${maxSize} + echo "Disk has reached it's MAX capacity ${maxSize}, add a new disk to it" + result=$(kubectl patch pvc "${pvc}" -n "${ns}" --type json --patch "[{ op: replace, path: /spec/resources/requests/storage, value: ${newSize} }]") + else + result=$(kubectl patch pvc "${pvc}" -n "${ns}" --type json --patch "[{ op: replace, path: /spec/resources/requests/storage, value: ${newSize}${unitSize} }]") + fi + echo "${result}" + fi +} + +function growHorizontally() { + local increaseOSDCount=$1 + local pvc=$2 + local ns=$3 + local maxOSDCount=$4 + local deviceSetName + local cluster="" + local deviceSet="" + local currentOSDCount=0 + local clusterCount=0 + local deviceSetCount=0 + deviceSetName=$(kubectl get pvc "${pvc}" -n "${ns}" -o json | jq -r '.metadata.labels."ceph.rook.io/DeviceSet"') + while [ "$cluster" != "null" ] + do + cluster=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}]") + while [ "$deviceSet" != "null" ] + do + deviceSet=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}].spec.storage.storageClassDeviceSets[${deviceSetCount}].name") + if [[ $deviceSet == "${deviceSetName}" ]] + then + currentOSDCount=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}].spec.storage.storageClassDeviceSets[${deviceSetCount}].count") + finalCount=$(( "${currentOSDCount}" + "${increaseOSDCount}" )) + echo "OSD count: ${currentOSDCount}. OSD count will be increased by ${increaseOSDCount}." + if [ "${finalCount}" -ge "${maxOSDCount}" ] + then + finalCount=${maxOSDCount} + echo "DeviceSet ${deviceSet} capacity is full, cannot add more OSD to it" + fi + echo "Total count of OSDs for deviceset ${deviceSetName} is set to ${finalCount}." + clusterName=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}].metadata.name" ) + result=$(kubectl patch CephCluster "${clusterName}" -n "${ns}" --type json --patch "[{ op: replace, path: /spec/storage/storageClassDeviceSets/${deviceSetCount}/count, value: ${finalCount} }]") + echo "${result}" + break + fi + deviceSetCount=$((deviceSetCount+1)) + deviceSet=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}].spec.storage.storageClassDeviceSets[${deviceSetCount}].name") + done + clusterCount=$((clusterCount+1)) + cluster=$(kubectl get CephCluster -n "${ns}" -o json | jq -r ".items[${clusterCount}]") + done +} + +function growOSD(){ + itr=0 + alertmanagerroute=$(kubectl -n rook-ceph -o jsonpath="{.status.hostIP}" get pod prometheus-rook-prometheus-0) + route=${alertmanagerroute}:30900 + toolbox=$(kubectl get pods -n rook-ceph | grep -i rook-ceph-tools | awk '{ print $1 }') + alerts=$(kubectl exec -it "${toolbox}" -n rook-ceph -- bash -c "curl -s http://${route}/api/v1/alerts") + export total_alerts + total_alerts=$( jq '.data.alerts | length' <<< "${alerts}") + echo "Looping at $(date +"%Y-%m-%d %H:%M:%S")" + + while true + do + if [ "${total_alerts}" == "" ] + then + echo "Alert manager not configured,re-run the script" + exit 1 + fi + export entry + entry=$( jq ".data.alerts[$itr]" <<< "${alerts}") + thename=$(echo "${entry}" | jq -r '.labels.alertname') + if [ "${thename}" = "CephOSDNearFull" ] || [ "${thename}" = "CephOSDCriticallyFull" ] + then + echo "${entry}" + ns=$(echo "${entry}" | jq -r '.labels.namespace') + osdID=$(echo "${entry}" | jq -r '.labels.ceph_daemon') + osdID=${osdID/./-} + pvc=$(kubectl get deployment -n "${ns}" rook-ceph-"${osdID}" -o json | jq -r '.metadata.labels."ceph.rook.io/pvc"') + if [[ $pvc == null ]] + then + echo "PVC not found, script can only run on PVC-based cluster" + exit 1 + fi + echo "Processing NearFull or Full alert for PVC ${pvc} in namespace ${ns}" + if [[ $1 == "count" ]] + then + growHorizontally "$2" "${pvc}" "${ns}" "$3" + else + growVertically "$2" "${pvc}" "${ns}" "$3" + fi + fi + (( itr = itr + 1 )) + if [[ "${itr}" == "${total_alerts}" ]] || [[ "${total_alerts}" == "0" ]] + then + sleep 600 + alerts=$(kubectl exec -it "${toolbox}" -n rook-ceph -- bash -c "curl -s http://${route}/api/v1/alerts") + total_alerts=$( jq '.data.alerts | length' <<< "${alerts}") + itr=0 + echo "Looping at $(date +"%Y-%m-%d %H:%M:%S")" + fi + done +} + +function creatingPrerequisites(){ + echo "creating Prerequisites deployments - Prometheus Operator and Prometheus Instances" + # creating Prometheus operator + kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.40.0/bundle.yaml + # waitng for Prometheus operator to get ready + timeout 30 sh -c "until [ $(kubectl get pod -l app.kubernetes.'io/name'=prometheus-operator -o json | jq -r '.items[0].status.phase') = Running ]; do echo 'waiting for prometheus-operator to get created' && sleep 1; done" + # creating a service monitor that will watch the Rook cluster and collect metrics regularly + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/service-monitor.yaml + # create the PrometheusRule for Rook alerts. + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus-ceph-v14-rules.yaml + # create prometheus-rook-prometheus-0 pod + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus.yaml + # create prometheus-service + kubectl create -f https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/monitoring/prometheus-service.yaml + # waitng for prometheus-rook-prometheus-0 pod to get ready + timeout 60 sh -c "until [ $(kubectl get pod -l prometheus=rook-prometheus -nrook-ceph -o json | jq -r '.items[0].status.phase') = Running ]; do echo 'waiting for prometheus-rook-prometheus-0 pod to get created' && sleep 1; done" + if [ "$(kubectl get pod -l prometheus=rook-prometheus -nrook-ceph)" == "" ] + then + echo "prometheus-rook-prometheus-0 pod not created, re-run the script" + exit 1 + fi + echo "Prerequisites deployments created" +} + +function invalidCall(){ + echo " $0 [command] +Available Commands for normal cluster: + ./auto-grow-storage.sh count --max maxCount --count rate Scale horizontally by adding more OSDs to the cluster + ./auto-grow-storage.sh size --max maxSize --growth-rate percent Scale vertically by increasing the size of existing OSDs +" >&2 +} + +case "${1:-}" in +count) + if [[ $# -ne 5 ]]; then + echo "incorrect command to run the script" + invalidCall + exit 1 + fi + max=$3 + count=$5 + if ! [[ "${max}" =~ ^[0-9]+$ ]] + then + echo "maxCount should be an integer" + invalidCall + exit 1 + fi + if ! [[ "${count}" =~ ^[0-9]+$ ]] + then + echo "rate should be an integer" + invalidCall + exit 1 + fi + creatingPrerequisites + echo "Adding on nearfull and full alert and number of OSD to add is ${count}" + growOSD count "${count}" "${max}" + ;; +size) + if [[ $# -ne 5 ]]; then + echo "incorrect command to run the script" + invalidCall + exit 1 + fi + max=$3 + growRate=$5 + if [[ "${max}" =~ ^[0-9]+$ ]] + then + echo "maxSize should be an string" + invalidCall + exit 1 + fi + if ! [[ "${growRate}" =~ ^[0-9]+$ ]] + then + echo "growth-rate should be an integer" + invalidCall + exit 1 + fi + creatingPrerequisites + echo "Resizing on nearfull and full alert and Expansion percentage set to ${growRate}%" + growOSD size "${growRate}" "${max}" + ;; +*) + invalidCall + ;; +esac diff --git a/tests/scripts/build-release.sh b/tests/scripts/build-release.sh index a773877846b4..e8553e8ce168 100755 --- a/tests/scripts/build-release.sh +++ b/tests/scripts/build-release.sh @@ -5,42 +5,57 @@ set -ex # FUNCTIONS # ############# - -if [[ ${GITHUB_REF} =~ master ]]; then - CHANNEL=master -else - CHANNEL=release -fi - function build() { - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" - build/run make VERSION=0 build.all + build/run make build.all # quick check that go modules are tidied build/run make mod.check } -function publish_and_promote() { +function publish() { build - build/run make -C build/release build BRANCH_NAME=${BRANCH_NAME} GIT_API_TOKEN=${GIT_API_TOKEN} + build/run make -C build/release build BRANCH_NAME=${BRANCH_NAME} TAG_WITH_SUFFIX=${TAG_WITH_SUFFIX} GIT_API_TOKEN=${GIT_API_TOKEN} git status & git diff & - build/run make -C build/release publish BRANCH_NAME=${BRANCH_NAME} AWS_ACCESS_KEY_ID=${AWS_USR} AWS_SECRET_ACCESS_KEY=${AWS_PSW} GIT_API_TOKEN=${GIT_API_TOKEN} - # automatically promote the master builds - build/run make -C build/release promote BRANCH_NAME=${BRANCH_NAME} CHANNEL=${CHANNEL} AWS_ACCESS_KEY_ID=${AWS_USR} AWS_SECRET_ACCESS_KEY=${AWS_PSW} - + build/run make -C build/release publish BRANCH_NAME=${BRANCH_NAME} TAG_WITH_SUFFIX=${TAG_WITH_SUFFIX} AWS_ACCESS_KEY_ID=${AWS_USR} AWS_SECRET_ACCESS_KEY=${AWS_PSW} GIT_API_TOKEN=${GIT_API_TOKEN} } -function publish() { - build - build/run make -C build/release build BRANCH_NAME=${BRANCH_NAME} TAG_WITH_SUFFIX=true GIT_API_TOKEN=${GIT_API_TOKEN} - git status & - git diff & - build/run make -C build/release publish BRANCH_NAME=${BRANCH_NAME} TAG_WITH_SUFFIX=true AWS_ACCESS_KEY_ID=${AWS_USR} AWS_SECRET_ACCESS_KEY=${AWS_PSW} GIT_API_TOKEN=${GIT_API_TOKEN} +function promote() { + # automatically promote the master builds + echo "Promoting from branch ${BRANCH_NAME}" + build/run make -C build/release promote BRANCH_NAME=${BRANCH_NAME} TAG_WITH_SUFFIX=${TAG_WITH_SUFFIX} CHANNEL=${CHANNEL} AWS_ACCESS_KEY_ID=${AWS_USR} AWS_SECRET_ACCESS_KEY=${AWS_PSW} } -selected_function="$1" -if [ "$selected_function" = "publish_and_promote" ]; then - publish_and_promote -elif [ "$selected_function" = "publish" ]; then - publish +############# +# MAIN # +############# + +SHOULD_PROMOTE=true +if [[ ${GITHUB_REF} =~ master ]]; then + echo "Publishing from master" + CHANNEL=master +else + echo "Tagging with suffix for release and tagged builds" + TAG_WITH_SUFFIX=true + CHANNEL=release + + # If a tag, find the source release branch + if [[ $BRANCH_NAME = v* ]]; then + TAG_NAME=${BRANCH_NAME} + BRANCH_NAME=$(git branch -r --contain refs/tags/${BRANCH_NAME} | grep "origin/release-." | sed 's/origin\///' | xargs) + if [[ $BRANCH_NAME = "" ]]; then + echo "Branch name not found in tag $TAG_NAME" + exit 1 + fi + echo "Publishing tag ${TAG_NAME} in branch ${BRANCH_NAME}" + else + echo "Publishing from release branch ${BRANCH_NAME}" + SHOULD_PROMOTE=false + fi +fi + + +publish + +if [[ "$SHOULD_PROMOTE" = true ]]; then + promote fi diff --git a/tests/scripts/collect-logs.sh b/tests/scripts/collect-logs.sh new file mode 100755 index 000000000000..a765119075a9 --- /dev/null +++ b/tests/scripts/collect-logs.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -x + +# User parameters +: "${CLUSTER_NAMESPACE:="rook-ceph"}" +: "${OPERATOR_NAMESPACE:="$CLUSTER_NAMESPACE"}" +: "${LOG_DIR:="test"}" + +LOG_DIR="${LOG_DIR%/}" # remove trailing slash if necessary +mkdir -p "${LOG_DIR}" + +CEPH_CMD="kubectl -n ${CLUSTER_NAMESPACE} exec deploy/rook-ceph-tools -- ceph --connect-timeout 3" + +$CEPH_CMD -s > "${LOG_DIR}"/ceph-status.txt +$CEPH_CMD osd dump > "${LOG_DIR}"/ceph-osd-dump.txt +$CEPH_CMD report > "${LOG_DIR}"/ceph-report.txt + +kubectl -n "${OPERATOR_NAMESPACE}" logs deploy/rook-ceph-operator > "${LOG_DIR}"/operator-logs.txt +kubectl -n "${OPERATOR_NAMESPACE}" get pods -o wide > "${LOG_DIR}"/operator-pods-list.txt +kubectl -n "${CLUSTER_NAMESPACE}" get pods -o wide > "${LOG_DIR}"/cluster-pods-list.txt +prepare_job="$(kubectl -n "${CLUSTER_NAMESPACE}" get job -l app=rook-ceph-osd-prepare --output name | awk 'FNR <= 1')" # outputs job/ +kubectl -n "${CLUSTER_NAMESPACE}" describe "${prepare_job}" > "${LOG_DIR}"/osd-prepare-describe.txt +kubectl -n "${CLUSTER_NAMESPACE}" logs "${prepare_job}" > "${LOG_DIR}"/osd-prepare-logs.txt +kubectl -n "${CLUSTER_NAMESPACE}" describe deploy/rook-ceph-osd-0 > "${LOG_DIR}"/rook-ceph-osd-0-describe.txt +kubectl -n "${CLUSTER_NAMESPACE}" describe deploy/rook-ceph-osd-1 > "${LOG_DIR}"/rook-ceph-osd-1-describe.txt +kubectl -n "${CLUSTER_NAMESPACE}" logs deploy/rook-ceph-osd-0 --all-containers > "${LOG_DIR}"/rook-ceph-osd-0-logs.txt +kubectl -n "${CLUSTER_NAMESPACE}" logs deploy/rook-ceph-osd-1 --all-containers > "${LOG_DIR}"/rook-ceph-osd-1-logs.txt +kubectl get all -n "${OPERATOR_NAMESPACE}" -o wide > "${LOG_DIR}"/operator-wide.txt +kubectl get all -n "${OPERATOR_NAMESPACE}" -o wide > "${LOG_DIR}"/operator-yaml.txt +kubectl get all -n "${CLUSTER_NAMESPACE}" -o wide > "${LOG_DIR}"/cluster-wide.txt +kubectl get all -n "${CLUSTER_NAMESPACE}" -o yaml > "${LOG_DIR}"/cluster-yaml.txt +kubectl -n "${CLUSTER_NAMESPACE}" get cephcluster -o yaml > "${LOG_DIR}"/cephcluster.txt +sudo lsblk | sudo tee -a "${LOG_DIR}"/lsblk.txt +journalctl -o short-precise --dmesg > "${LOG_DIR}"/dmesg.txt +journalctl > "${LOG_DIR}"/journalctl.txt diff --git a/tests/scripts/create-bluestore-partitions.sh b/tests/scripts/create-bluestore-partitions.sh index c0d25517e02b..0c5f0891e44f 100755 --- a/tests/scripts/create-bluestore-partitions.sh +++ b/tests/scripts/create-bluestore-partitions.sh @@ -56,6 +56,7 @@ function create_block_partition { for osd in $(seq 1 "$osd_count"); do echo "$osd" create_partition osd-"$osd" + echo "SUBSYSTEM==\"block\", ATTR{size}==\"12582912\", ATTR{partition}==\"$osd\", ACTION==\"add\", RUN+=\"/bin/chown 167:167 ${DISK}${osd}\"" | sudo tee -a /etc/udev/rules.d/01-rook-"$osd".rules done fi } @@ -120,4 +121,4 @@ sudo udevadm settle # Print drives sudo lsblk -sudo parted "$DISK" -s print \ No newline at end of file +sudo parted "$DISK" -s print diff --git a/tests/scripts/deploy-validate-vault.sh b/tests/scripts/deploy-validate-vault.sh index 4c769c189c22..fc3809652192 100755 --- a/tests/scripts/deploy-validate-vault.sh +++ b/tests/scripts/deploy-validate-vault.sh @@ -26,13 +26,13 @@ function install_helm { } if [[ "$(uname)" == "Linux" ]]; then - sudo apt-get install jq -y - install_helm + sudo apt-get install jq -y + install_helm fi function generate_vault_tls_config { openssl genrsa -out "${TMPDIR}"/vault.key 2048 - + cat <"${TMPDIR}"/csr.conf [req] req_extensions = v3_req @@ -50,11 +50,11 @@ DNS.3 = ${SERVICE}.${NAMESPACE}.svc DNS.4 = ${SERVICE}.${NAMESPACE}.svc.cluster.local IP.1 = 127.0.0.1 EOF - + openssl req -new -key "${TMPDIR}"/vault.key -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -out "${TMPDIR}"/server.csr -config "${TMPDIR}"/csr.conf - + export CSR_NAME=vault-csr - + cat <"${TMPDIR}"/csr.yaml apiVersion: certificates.k8s.io/v1beta1 kind: CertificateSigningRequest @@ -69,20 +69,20 @@ spec: - key encipherment - server auth EOF - + kubectl create -f "${TMPDIR}/"csr.yaml - + kubectl certificate approve ${CSR_NAME} - + serverCert=$(kubectl get csr ${CSR_NAME} -o jsonpath='{.status.certificate}') echo "${serverCert}" | openssl base64 -d -A -out "${TMPDIR}"/vault.crt kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d > "${TMPDIR}"/vault.ca kubectl create secret generic ${SECRET_NAME} \ - --namespace ${NAMESPACE} \ - --from-file=vault.key="${TMPDIR}"/vault.key \ - --from-file=vault.crt="${TMPDIR}"/vault.crt \ - --from-file=vault.ca="${TMPDIR}"/vault.ca - + --namespace ${NAMESPACE} \ + --from-file=vault.key="${TMPDIR}"/vault.key \ + --from-file=vault.crt="${TMPDIR}"/vault.crt \ + --from-file=vault.ca="${TMPDIR}"/vault.ca + # for rook kubectl create secret generic vault-ca-cert --namespace ${ROOK_NAMESPACE} --from-file=cert="${TMPDIR}"/vault.ca kubectl create secret generic vault-client-cert --namespace ${ROOK_NAMESPACE} --from-file=cert="${TMPDIR}"/vault.crt @@ -90,7 +90,7 @@ EOF } function vault_helm_tls { - + cat <"${TMPDIR}/"custom-values.yaml global: enabled: true @@ -119,30 +119,30 @@ server: path = "/vault/data" } EOF - + } function deploy_vault { # TLS config generate_vault_tls_config vault_helm_tls - + # Install Vault with Helm helm repo add hashicorp https://helm.releases.hashicorp.com helm install vault hashicorp/vault --values "${TMPDIR}/"custom-values.yaml timeout 120 sh -c 'until kubectl get pods -l app.kubernetes.io/name=vault --field-selector=status.phase=Running|grep vault-0; do sleep 5; done' - + # Unseal Vault VAULT_INIT_TEMP_DIR=$(mktemp) kubectl exec -ti vault-0 -- vault operator init -format "json" -ca-cert /vault/userconfig/vault-server-tls/vault.crt | tee -a "$VAULT_INIT_TEMP_DIR" for i in $(seq 0 2); do - kubectl exec -ti vault-0 -- vault operator unseal -ca-cert /vault/userconfig/vault-server-tls/vault.crt "$(jq -r ".unseal_keys_b64[$i]" "$VAULT_INIT_TEMP_DIR")" + kubectl exec -ti vault-0 -- vault operator unseal -ca-cert /vault/userconfig/vault-server-tls/vault.crt "$(jq -r ".unseal_keys_b64[$i]" "$VAULT_INIT_TEMP_DIR")" done kubectl get pods -l app.kubernetes.io/name=vault - + # Wait for vault to be ready once unsealed while [[ $(kubectl get pods -l app.kubernetes.io/name=vault -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting vault to be ready" && sleep 1; done - + # Configure Vault ROOT_TOKEN=$(jq -r '.root_token' "$VAULT_INIT_TEMP_DIR") kubectl exec -it vault-0 -- vault login -ca-cert /vault/userconfig/vault-server-tls/vault.crt "$ROOT_TOKEN" @@ -151,7 +151,7 @@ function deploy_vault { kubectl exec -ti vault-0 -- vault secrets enable -ca-cert /vault/userconfig/vault-server-tls/vault.crt -path=rook/ver2 kv-v2 kubectl exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver1 || true # failure is expected kubectl exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver2 || true # failure is expected - + # Configure Vault Policy for Rook echo ' path "rook/*" { @@ -160,12 +160,12 @@ function deploy_vault { path "sys/mounts" { capabilities = ["read"] }'| kubectl exec -i vault-0 -- vault policy write -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook - - + # Create a token for Rook ROOK_TOKEN=$(kubectl exec vault-0 -- vault token create -policy=rook -format json -ca-cert /vault/userconfig/vault-server-tls/vault.crt|jq -r '.auth.client_token'|base64) - + # Configure cluster - sed -i "s|ROOK_TOKEN|$ROOK_TOKEN|" tests/manifests/test-kms-vault.yaml + sed -i "s|ROOK_TOKEN|${ROOK_TOKEN//[$'\t\r\n']}|" tests/manifests/test-kms-vault.yaml } function validate_rgw_token { @@ -176,7 +176,7 @@ function validate_rgw_token { RGW_TOKEN_FILE=$(kubectl -n rook-ceph describe pods "$RGW_POD" | grep "rgw-crypt-vault-token-file" | cut -f2- -d=) VAULT_PATH_PREFIX=$(kubectl -n rook-ceph describe pods "$RGW_POD" | grep "rgw-crypt-vault-prefix" | cut -f2- -d=) VAULT_TOKEN=$(kubectl -n rook-ceph exec $RGW_POD -- cat $RGW_TOKEN_FILE) - + #fetch key from vault server using token from RGW pod, P.S using -k for curl since custom ssl certs not yet to support in RGW FETCHED_KEY=$(kubectl -n rook-ceph exec $RGW_POD -- curl -k -X GET -H "X-Vault-Token:$VAULT_TOKEN" "$VAULT_SERVER""$VAULT_PATH_PREFIX"/"$RGW_BUCKET_KEY"|jq -r .data.data.key) if [[ "$ENCRYPTION_KEY" != "$FETCHED_KEY" ]]; then @@ -196,7 +196,7 @@ function validate_rgw_deployment { function validate_osd_secret { NB_OSD_PVC=$(kubectl -n rook-ceph get pvc|grep -c set1) NB_VAULT_SECRET=$(kubectl -n default exec -ti vault-0 -- vault kv list -ca-cert /vault/userconfig/vault-server-tls/vault.crt rook/ver1|grep -c set1) - + if [ "$NB_OSD_PVC" -ne "$NB_VAULT_SECRET" ]; then echo "number of osd pvc is $NB_OSD_PVC and number of vault secret is $NB_VAULT_SECRET, mismatch" exit 1 @@ -210,14 +210,14 @@ function validate_osd_secret { case "$ACTION" in deploy) deploy_vault - ;; + ;; validate_osd) validate_osd_deployment - ;; + ;; validate_rgw) validate_rgw_deployment ;; *) echo "invalid action $ACTION" >&2 exit 1 - esac +esac diff --git a/tests/scripts/gen_release_notes.sh b/tests/scripts/gen_release_notes.sh new file mode 100755 index 000000000000..061435cae39a --- /dev/null +++ b/tests/scripts/gen_release_notes.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +function help() { + print=" + To run this command, + 1. verify you are selecting right branch from GitHub UI dropdown menu + 2. enter the tag you want to create + " + echo "$print" + exit 1 +} + +if [ -z "${GITHUB_USER}" ] || [ -z "${GITHUB_TOKEN}" ]; then + echo "requires both GITHUB_USER and GITHUB_TOKEN to be set as env variable" + help +fi + +pr_list=$(git log --pretty="%s" --merges --left-only "${FROM_BRANCH}"..."${TO_TAG}" | grep pull | awk '/Merge pull request/ {print $4}' | cut -c 2-) + +# for releases notes +function release_notes() { + for pr in $pr_list; do + # get PR title + backport_pr=$(curl -s -u "${GITHUB_USER}":"${GITHUB_TOKEN}" "https://api.github.com/repos/rook/rook/pulls/${pr}" | jq '.title') + # with upstream/release-1.6 v1.6.8, it was giving extra PR numbers, so removing after PR for changing tag is merged. + if [[ "$backport_pr" =~ ./*"build: Update build version to $TO_TAG"* ]]; then + break + fi + # check if it is manual backport PR or not, for mergify backport PR it will contain "(backport" + if [[ "$backport_pr" =~ .*"(backport".* ]]; then + # find the PR number after the # + original_pr=$(echo "$backport_pr" | sed -n -e 's/^.*#//p' | grep -E0o '[0-9]' | tr -d '\n') + else + # in manual backport PR, we'll directly fetch the owner and title from the PR number + original_pr=$pr + fi + # get the PR title and PR owner in required format + title_with_user=$(curl -s -u "${GITHUB_USER}":"${GITHUB_TOKEN}" "https://api.github.com/repos/rook/rook/pulls/${original_pr}" | jq '.title+ " (#, @"+.user.login+")"') + # add PR number after "#" + result=$(echo "$title_with_user" | sed "s/(#/(#$original_pr/" |tail -c +2) + # remove last `"` + result=${result%\"} + echo "$result" + done +} + +release_notes diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index d86996b34292..3b76261a5112 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -23,6 +23,7 @@ set -xe NETWORK_ERROR="connection reset by peer" SERVICE_UNAVAILABLE_ERROR="Service Unavailable" INTERNAL_ERROR="INTERNAL_ERROR" +INTERNAL_SERVER_ERROR="500 Internal Server Error" ############# # FUNCTIONS # @@ -56,16 +57,35 @@ function use_local_disk() { } function use_local_disk_for_integration_test() { + sudo udevadm control --log-priority=debug sudo swapoff --all --verbose sudo umount /mnt + sudo sed -i.bak '/\/mnt/d' /etc/fstab # search for the device since it keeps changing between sda and sdb PARTITION="${BLOCK}1" sudo wipefs --all --force "$PARTITION" - sudo lsblk + sudo dd if=/dev/zero of="${PARTITION}" bs=1M count=1 + sudo lsblk --bytes # add a udev rule to force the disk partitions to ceph # we have observed that some runners keep detaching/re-attaching the additional disk overriding the permissions to the default root:disk # for more details see: https://github.com/rook/rook/issues/7405 echo "SUBSYSTEM==\"block\", ATTR{size}==\"29356032\", ACTION==\"add\", RUN+=\"/bin/chown 167:167 $PARTITION\"" | sudo tee -a /etc/udev/rules.d/01-rook.rules + # for below, see: https://access.redhat.com/solutions/1465913 + block_base="$(basename "${BLOCK}")" + echo "ACTION==\"add|change\", KERNEL==\"${block_base}\", OPTIONS:=\"nowatch\"" | sudo tee -a /etc/udev/rules.d/99-z-rook-nowatch.rules + # The partition is still getting reloaded occasionally during operation. See https://github.com/rook/rook/issues/8975 + # Try issuing some disk-inspection commands to jog the system so it won't reload the partitions + # during OSD provisioning. + sudo udevadm control --reload-rules || true + sudo udevadm trigger || true + time sudo udevadm settle || true + sudo partprobe || true + sudo lsblk --noheadings --pairs "${BLOCK}" || true + sudo sgdisk --print "${BLOCK}" || true + sudo udevadm info --query=property "${BLOCK}" || true + sudo lsblk --noheadings --pairs "${PARTITION}" || true + journalctl -o short-precise --dmesg | tail -40 || true + cat /etc/fstab || true } function create_partitions_for_osds() { @@ -88,15 +108,22 @@ function create_bluestore_partitions_and_pvcs_for_wal(){ tests/scripts/localPathPV.sh "$BLOCK_PART" "$DB_PART" "$WAL_PART" } +function collect_udev_logs_in_background() { + local log_dir="${1:-"/home/runner/work/rook/rook/tests/integration/_output/tests"}" + mkdir -p "${log_dir}" + udevadm monitor --property &> "${log_dir}"/udev-monitor-property.txt & + udevadm monitor --kernel &> "${log_dir}"/udev-monitor-kernel.txt & + udevadm monitor --udev &> "${log_dir}"/udev-monitor-udev.txt & +} + function build_rook() { build_type=build if [ -n "$1" ]; then build_type=$1 fi GOPATH=$(go env GOPATH) make clean - # set VERSION to a dummy value since Jenkins normally sets it for us. Do this to make Helm happy and not fail with "Error: Invalid Semantic Version" for _ in $(seq 1 3); do - if ! o=$(make -j"$(nproc)" IMAGES='ceph' VERSION=0 "$build_type"); then + if ! o=$(make -j"$(nproc)" IMAGES='ceph' "$build_type"); then case "$o" in *"$NETWORK_ERROR"*) echo "network failure occurred, retrying..." @@ -110,17 +137,23 @@ function build_rook() { echo "network failure occurred, retrying..." continue ;; + *"$INTERNAL_SERVER_ERROR"*) + echo "network failure occurred, retrying..." + continue + ;; *) # valid failure exit 1 esac + # no errors so we break the loop after the first iteration + break fi done # validate build tests/scripts/validate_modified_files.sh build docker images if [[ "$build_type" == "build" ]]; then - docker tag $(docker images | awk '/build-/ {print $1}') rook/ceph:master + docker tag $(docker images | awk '/build-/ {print $1}') rook/ceph:local-build fi } @@ -130,9 +163,25 @@ function build_rook_all() { function validate_yaml() { cd cluster/examples/kubernetes/ceph + + # create the Rook CRDs and other resources kubectl create -f crds.yaml -f common.yaml + + # create the volume replication CRDs + replication_version=v0.1.0 + replication_url="https://raw.githubusercontent.com/csi-addons/volume-replication-operator/${replication_version}/config/crd/bases" + kubectl create -f "${replication_url}/replication.storage.openshift.io_volumereplications.yaml" + kubectl create -f "${replication_url}/replication.storage.openshift.io_volumereplicationclasses.yaml" + + #create the KEDA CRDS + keda_version=2.4.0 + keda_url="https://github.com/kedacore/keda/releases/download/v${keda_version}/keda-${keda_version}.yaml" + kubectl apply -f "${keda_url}" + # skipping folders and some yamls that are only for openshift. - kubectl create $(ls -I scc.yaml -I "*-openshift.yaml" -I "*.sh" -I "*.py" -p | grep -v / | awk ' { print " -f " $1 } ') --dry-run + manifests="$(find . -maxdepth 1 -type f -name '*.yaml' -and -not -name '*openshift*' -and -not -name 'scc.yaml')" + with_f_arg="$(echo "$manifests" | awk '{printf " -f %s",$1}')" # don't add newline + kubectl create ${with_f_arg} --dry-run=client } function create_cluster_prerequisites() { @@ -140,9 +189,14 @@ function create_cluster_prerequisites() { kubectl create -f crds.yaml -f common.yaml } +function deploy_manifest_with_local_build() { + sed -i "s|image: rook/ceph:v1.7.8|image: rook/ceph:local-build|g" $1 + kubectl create -f $1 +} + function deploy_cluster() { cd cluster/examples/kubernetes/ceph - kubectl create -f operator.yaml + deploy_manifest_with_local_build operator.yaml sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\/}|g" cluster-test.yaml kubectl create -f cluster-test.yaml kubectl create -f object-test.yaml @@ -151,14 +205,35 @@ function deploy_cluster() { kubectl create -f rbdmirror.yaml kubectl create -f filesystem-mirror.yaml kubectl create -f nfs-test.yaml - kubectl create -f toolbox.yaml + deploy_manifest_with_local_build toolbox.yaml } function wait_for_prepare_pod() { - timeout 180 sh -c 'until kubectl -n rook-ceph logs -f job/$(kubectl -n rook-ceph get job -l app=rook-ceph-osd-prepare -o jsonpath='{.items[0].metadata.name}'); do sleep 5; done' || true - timeout 60 sh -c 'until kubectl -n rook-ceph logs $(kubectl -n rook-ceph get pod -l app=rook-ceph-osd,ceph_daemon_id=0 -o jsonpath='{.items[*].metadata.name}') --all-containers; do echo "waiting for osd container" && sleep 1; done' || true - kubectl -n rook-ceph describe job/$(kubectl -n rook-ceph get pod -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}') || true - kubectl -n rook-ceph describe deploy/rook-ceph-osd-0 || true + get_pod_cmd=(kubectl --namespace rook-ceph get pod --no-headers) + timeout=450 + start_time="${SECONDS}" + while [[ $(( SECONDS - start_time )) -lt $timeout ]]; do + pod="$("${get_pod_cmd[@]}" --selector=app=rook-ceph-osd-prepare --output custom-columns=NAME:.metadata.name,PHASE:status.phase | awk 'FNR <= 1')" + if echo "$pod" | grep 'Running\|Succeeded\|Failed'; then break; fi + echo 'waiting for at least one osd prepare pod to be running or finished' + sleep 5 + done + pod="$("${get_pod_cmd[@]}" --selector app=rook-ceph-osd-prepare --output name | awk 'FNR <= 1')" + kubectl --namespace rook-ceph logs --follow "$pod" + timeout=60 + start_time="${SECONDS}" + while [[ $(( SECONDS - start_time )) -lt $timeout ]]; do + pod="$("${get_pod_cmd[@]}" --selector app=rook-ceph-osd,ceph_daemon_id=0 --output custom-columns=NAME:.metadata.name,PHASE:status.phase)" + if echo "$pod" | grep 'Running'; then break; fi + echo 'waiting for OSD 0 pod to be running' + sleep 1 + done + # getting the below logs is a best-effort attempt, so use '|| true' to allow failures + pod="$("${get_pod_cmd[@]}" --selector app=rook-ceph-osd,ceph_daemon_id=0 --output name)" || true + kubectl --namespace rook-ceph logs "$pod" || true + job="$(kubectl --namespace rook-ceph get job --selector app=rook-ceph-osd-prepare --output name | awk 'FNR <= 1')" || true + kubectl -n rook-ceph describe "$job" || true + kubectl -n rook-ceph describe deployment/rook-ceph-osd-0 || true } function wait_for_ceph_to_be_ready() { @@ -249,7 +324,9 @@ selected_function="$1" if [ "$selected_function" = "generate_tls_config" ]; then $selected_function $2 $3 $4 $5 elif [ "$selected_function" = "wait_for_ceph_to_be_ready" ]; then - $selected_function $2 $3 + $selected_function $2 $3 +elif [ "$selected_function" = "deploy_manifest_with_local_build" ]; then + $selected_function $2 else $selected_function fi diff --git a/tests/scripts/validate_cluster.sh b/tests/scripts/validate_cluster.sh index 20569b6dbbb5..80a2e1c527c4 100755 --- a/tests/scripts/validate_cluster.sh +++ b/tests/scripts/validate_cluster.sh @@ -39,6 +39,8 @@ function wait_for_daemon () { sleep 1 let timeout=timeout-1 done + echo "current status:" + $EXEC_COMMAND -s return 1 } @@ -88,31 +90,21 @@ function test_demo_pool { } function test_csi { - # shellcheck disable=SC2046 - timeout 90 sh -c 'until [ $(kubectl -n rook-ceph get pods --field-selector=status.phase=Running|grep -c ^csi-) -eq 4 ]; do sleep 1; done' - if [ $? -eq 0 ]; then - return 0 - fi - return 1 + timeout 360 bash <<-'EOF' + until [[ "$(kubectl -n rook-ceph get pods --field-selector=status.phase=Running|grep -c ^csi-)" -eq 4 ]]; do + echo "waiting for csi pods to be ready" + sleep 5 + done +EOF } -function display_status { - $EXEC_COMMAND -s > test/ceph-status.txt - $EXEC_COMMAND osd dump > test/ceph-osd-dump.txt - $EXEC_COMMAND report > test/ceph-report.txt - - kubectl -n rook-ceph logs deploy/rook-ceph-operator > test/operator-logs.txt - kubectl -n rook-ceph get pods -o wide > test/pods-list.txt - kubectl -n rook-ceph describe job/"$(kubectl -n rook-ceph get job -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}')" > test/osd-prepare-describe.txt - kubectl -n rook-ceph log job/"$(kubectl -n rook-ceph get job -l app=rook-ceph-osd-prepare -o jsonpath='{.items[*].metadata.name}')" > test/osd-prepare-logs.txt - kubectl -n rook-ceph describe deploy/rook-ceph-osd-0 > test/rook-ceph-osd-0-describe.txt - kubectl -n rook-ceph describe deploy/rook-ceph-osd-1 > test/rook-ceph-osd-1-describe.txt - kubectl -n rook-ceph logs deploy/rook-ceph-osd-0 --all-containers > test/rook-ceph-osd-0-logs.txt - kubectl -n rook-ceph logs deploy/rook-ceph-osd-1 --all-containers > test/rook-ceph-osd-1-logs.txt - kubectl get all -n rook-ceph -o wide > test/cluster-wide.txt - kubectl get all -n rook-ceph -o yaml > test/cluster-yaml.txt - kubectl -n rook-ceph get cephcluster -o yaml > test/cephcluster.txt - sudo lsblk | sudo tee -a test/lsblk.txt +function test_nfs { + timeout 360 bash <<-'EOF' + until [[ "$(kubectl -n rook-ceph get pods --field-selector=status.phase=Running|grep -c ^rook-ceph-nfs-)" -eq 1 ]]; do + echo "waiting for nfs pods to be ready" + sleep 5 + done +EOF } ######## @@ -123,7 +115,7 @@ test_demo_mon test_demo_mgr if [[ "$DAEMON_TO_VALIDATE" == "all" ]]; then - daemons_list="osd mds rgw rbd_mirror fs_mirror" + daemons_list="osd mds rgw rbd_mirror fs_mirror nfs" else # change commas to space comma_to_space=${DAEMON_TO_VALIDATE//,/ } @@ -158,6 +150,9 @@ for daemon in $daemons_list; do fs_mirror) test_demo_fs_mirror ;; + nfs) + test_nfs + ;; *) log "ERROR: unknown daemon to validate!" log "Available daemon are: mon mgr osd mds rgw rbd_mirror fs_mirror" @@ -171,7 +166,3 @@ $EXEC_COMMAND -s kubectl -n rook-ceph get pods kubectl -n rook-ceph logs "$(kubectl -n rook-ceph -l app=rook-ceph-operator get pods -o jsonpath='{.items[*].metadata.name}')" kubectl -n rook-ceph get cephcluster -o yaml - -set +eE -display_status -set -eE