diff --git a/ceph/ceph-delete-disk b/ceph/ceph-delete-disk new file mode 100644 index 0000000..9cb1f05 --- /dev/null +++ b/ceph/ceph-delete-disk @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Inspired from https://rook.io/docs/rook/v1.7/ceph-teardown.html + +if [ $# -ne 1 ]; then + echo $0 disk + echo f.i. $0 /dev/sdx + exit 1 +fi + +DISK="$1"; shift + +# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean) + +# You will have to run this step for all disks. +sgdisk --zap-all $DISK + +# Clean hdds with dd +dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync + +# Clean disks such as ssd with blkdiscard instead of dd +blkdiscard $DISK + +# These steps only have to be run once on each node +# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks. +ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % + +# ceph-volume setup can leave ceph- directories in /dev and /dev/mapper (unnecessary clutter) +rm -rf /dev/ceph-* +rm -rf /dev/mapper/ceph--* + +# Inform the OS of partition table changes +partprobe $DISK