root@u1:/etc/ceph# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.10497 root default
-3 0.03499 host u2
0 hdd 0.00999 osd.0 down 0 1.00000
1 hdd 0.01199 osd.1 down 0 1.00000
2 hdd 0.01399 osd.2 down 0 1.00000
-7 0.03499 host u3
4 hdd 0.00999 osd.4 up 1.00000 1.00000
5 hdd 0.01199 osd.5 up 1.00000 1.00000
7 hdd 0.01399 osd.7 up 1.00000 1.00000
-17 0.03499 host u5
9 hdd 0.00999 osd.9 up 1.00000 1.00000
10 hdd 0.01199 osd.10 up 1.00000 1.00000
11 hdd 0.01399 osd.11 up 1.00000 1.00000
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch daemon stop osd.0
Scheduled to stop osd.0 on host 'u2'
root@u1:/etc/ceph# ceph orch daemon stop osd.1
Scheduled to stop osd.1 on host 'u2'
root@u1:/etc/ceph# ceph orch daemon stop osd.2
Scheduled to stop osd.2 on host 'u2'
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch daemon rm osd.0 --force
Removed osd.0 from host 'u2'
root@u1:/etc/ceph# ceph orch daemon rm osd.1 --force
Removed osd.1 from host 'u2'
root@u1:/etc/ceph# ceph orch daemon rm osd.2 --force
Removed osd.2 from host 'u2'
root@u1:/etc/ceph#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph osd out 0
osd.0 is already out.
root@u1:/etc/ceph# ceph osd out 1
osd.1 is already out.
root@u1:/etc/ceph# ceph osd out 2
osd.2 is already out.
root@u1:/etc/ceph#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph osd crush remove osd.0
removed item id 0 name 'osd.0' from crush map
root@u1:/etc/ceph# ceph osd crush remove osd.1
removed item id 1 name 'osd.1' from crush map
root@u1:/etc/ceph# ceph osd crush remove osd.2
removed item id 2 name 'osd.2' from crush map
root@u1:/etc/ceph#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph osd purge osd.0 --yes-i-really-mean-it
purged osd.0
root@u1:/etc/ceph# ceph osd purge osd.1 --yes-i-really-mean-it
purged osd.1
root@u1:/etc/ceph# ceph osd purge osd.2 --yes-i-really-mean-it
purged osd.2
root@u1:/etc/ceph#
root@u1:/etc/ceph#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.06998 root default
-3 0 host u2
-7 0.03499 host u3
4 hdd 0.00999 osd.4 up 1.00000 1.00000
5 hdd 0.01199 osd.5 up 1.00000 1.00000
7 hdd 0.01399 osd.7 up 1.00000 1.00000
-17 0.03499 host u5
9 hdd 0.00999 osd.9 up 1.00000 1.00000
10 hdd 0.01199 osd.10 up 1.00000 1.00000
11 hdd 0.01399 osd.11 up 1.00000 1.00000
root@u1:/etc/ceph#
root@u2:/# ceph osd metadata 0 | grep devices
Error ENOENT: osd.0 does not exist
root@u2:/# ceph osd metadata 1 | grep devices
Error ENOENT: osd.1 does not exist
root@u2:/# ceph osd metadata 2 | grep devices
Error ENOENT: osd.2 does not exist
root@u2:/#
root@u2:/#
root@u2:/#
root@u2:/# wipefs -af /dev/sdb
/dev/sdb: 8 bytes were erased at offset 0x00000218 (LVM2_member): 4c 56 4d 32 20 30 30 31
root@u2:/# wipefs -af /dev/sdc
/dev/sdc: 8 bytes were erased at offset 0x00000218 (LVM2_member): 4c 56 4d 32 20 30 30 31
root@u2:/# wipefs -af /dev/sdd
/dev/sdd: 8 bytes were erased at offset 0x00000218 (LVM2_member): 4c 56 4d 32 20 30 30 31
root@u2:/#
root@u2:/#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdb --force
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdc --force
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdd --force
zap 报错~~
重启osd主机
root@u2:~# reboot
root@u2:~# wipefs -af /dev/sdb
root@u2:~# wipefs -af /dev/sdc
root@u2:~# wipefs -af /dev/sdd
root@u2:~#
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdb --force
zap successful for /dev/sdb on u2
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdc --force
Error EINVAL: Unable to zap: device '/dev/sdc' on u2 has 1 active OSD (osd.1). Use 'ceph orch osd rm' first.
root@u1:/etc/ceph# ceph orch device zap u2 /dev/sdd --force
Error EINVAL: Unable to zap: device '/dev/sdd' on u2 has 1 active OSD (osd.2). Use 'ceph orch osd rm' first.
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.10515 root default
-3 0.03517 host u2
0 hdd 0.00980 osd.0 up 1.00000 1.00000
1 hdd 0.01169 osd.1 up 1.00000 1.00000
2 hdd 0.01369 osd.2 up 1.00000 1.00000
-7 0.03499 host u3
4 hdd 0.00999 osd.4 up 1.00000 1.00000
5 hdd 0.01199 osd.5 up 1.00000 1.00000
7 hdd 0.01399 osd.7 up 1.00000 1.00000
-17 0.03499 host u5
9 hdd 0.00999 osd.9 up 1.00000 1.00000
10 hdd 0.01199 osd.10 up 1.00000 1.00000
11 hdd 0.01399 osd.11 up 1.00000 1.00000
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch ls
NAME PORTS RUNNING REFRESHED AGE PLACEMENT
alertmanager ?:9093,9094 1/1 3m ago 2w count:1
crash 4/4 4m ago 2w *
grafana ?:3000 1/1 3m ago 2w count:1
iscsi.gw 3/3 4m ago 10d u1;u2;u3
mgr 2/2 3m ago 2w count:2
mon 3/3 4m ago 10d count:3
node-exporter ?:9100 4/4 4m ago 2w *
osd.all-available-devices 9 4m ago 10d *
prometheus ?:9095 1/1 3m ago 2w count:1
rgw.rgw ?:80 3/3 4m ago 9d u1;u2;u3
root@u1:/etc/ceph#
root@u1:/etc/ceph# ceph orch ps|grep osd
osd.0 u2 running (2m) 46s ago 2m 105M 4096M 16.2.9 3520ead5eb19 f30a2a503231
osd.1 u2 running (2m) 46s ago 2m 113M 4096M 16.2.9 3520ead5eb19 20b40bfd80b2
osd.10 u5 running (2h) 4m ago 2w 237M 4096M 16.2.9 3520ead5eb19 1b26ddf645b8
osd.11 u5 running (2h) 4m ago 2w 236M 4096M 16.2.9 3520ead5eb19 cb7468aa3ea6
osd.2 u2 running (2m) 46s ago 2m 107M 4096M 16.2.9 3520ead5eb19 9d2f825db46e
osd.4 u3 running (2h) 4m ago 2w 166M 4096M 16.2.9 3520ead5eb19 f14fb6273f93
osd.5 u3 running (2h) 4m ago 2w 165M 4096M 16.2.9 3520ead5eb19 c0c0ad2a4fc8
osd.7 u3 running (2h) 4m ago 2w 228M 4096M 16.2.9 3520ead5eb19 5d76a9f92f6e
osd.9 u5 running (2h) 4m ago 2w 186M 4096M 16.2.9 3520ead5eb19 dd1dba2e974f
root@u1:/etc/ceph# ceph orch ps|grep osd|wc -l
9
root@u1:/etc/ceph#
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




