Skip to content

Commit

Permalink
osd: remove osd with purge instead of destroy
Browse files Browse the repository at this point in the history
The osd destroy command leaves the osd id in use instead
of fully purging the osd id. If the osd id is not also
removed, it gives the impression that there is still
something that needs to be cleaned up from the old osd.

Signed-off-by: Travis Nielsen <tnielsen@redhat.com>
  • Loading branch information
travisn committed Feb 25, 2022
1 parent ba165ec commit 879bde4
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 7 deletions.
7 changes: 4 additions & 3 deletions .github/workflows/canary-integration-test.yml
Expand Up @@ -75,7 +75,7 @@ jobs:
kubectl -n rook-ceph exec $toolbox -- ceph auth caps client.csi-rbd-provisioner mon 'profile rbd, allow command "osd ls"' osd 'profile rbd' mgr 'allow rw'
# print client.csi-rbd-provisioner user after update
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-provisioner
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool
# print client.csi-rbd-provisioner user after running script
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-provisioner
Expand Down Expand Up @@ -124,7 +124,7 @@ jobs:
# --cluster-name and --run-as-user flag while upgrading
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --rados-namespace radosNamespace --cluster-name rookStorage --run-as-user client.csi-rbd-node-rookStorage-replicapool-radosNamespace
# print ugraded client auth
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-node-rookStorage-replicapool-radosNamespace
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-node-rookStorage-replicapool-radosNamespace
- name: check-ownerreferences
run: tests/scripts/github-action-helper.sh check_ownerreferences
Expand All @@ -140,7 +140,8 @@ jobs:
kubectl -n rook-ceph create -f deploy/examples/osd-purge.yaml
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
kubectl -n rook-ceph exec $toolbox -- ceph status
timeout 120 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph osd tree|grep -qE 'osd.1.*.destroyed'; do echo 'waiting for ceph osd 1 to be destroyed'; sleep 1; done"
# wait until osd.1 is removed from the osd tree
timeout 120 sh -c "while kubectl -n rook-ceph exec $toolbox -- ceph osd tree|grep -qE 'osd.1'; do echo 'waiting for ceph osd 1 to be purged'; sleep 1; done"
kubectl -n rook-ceph exec $toolbox -- ceph status
kubectl -n rook-ceph exec $toolbox -- ceph osd tree
Expand Down
8 changes: 4 additions & 4 deletions pkg/daemon/ceph/osd/remove.go
Expand Up @@ -174,19 +174,19 @@ func removeOSD(clusterdContext *clusterd.Context, clusterInfo *client.ClusterInf
}

// purge the osd
logger.Infof("destroying osd.%d", osdID)
purgeOSDArgs := []string{"osd", "destroy", fmt.Sprintf("osd.%d", osdID), "--yes-i-really-mean-it"}
logger.Infof("purging osd.%d", osdID)
purgeOSDArgs := []string{"osd", "purge", fmt.Sprintf("osd.%d", osdID), "--force", "--yes-i-really-mean-it"}
_, err = client.NewCephCommand(clusterdContext, clusterInfo, purgeOSDArgs).Run()
if err != nil {
logger.Errorf("failed to purge osd.%d. %v", osdID, err)
}

// Attempting to remove the parent host. Errors can be ignored if there are other OSDs on the same host
logger.Infof("removing osd.%d from ceph", osdID)
logger.Infof("attempting to remove host %q from crush map if not in use", osdID)
hostArgs := []string{"osd", "crush", "rm", hostName}
_, err = client.NewCephCommand(clusterdContext, clusterInfo, hostArgs).Run()
if err != nil {
logger.Errorf("failed to remove CRUSH host %q. %v", hostName, err)
logger.Infof("failed to remove CRUSH host %q. %v", hostName, err)
}

// call archiveCrash to silence crash warning in ceph health if any
Expand Down

0 comments on commit 879bde4

Please sign in to comment.