暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

OSD down无法重启

原创 Oracle 2023-01-22
849

OSD down无法重启
上述介绍了重启OSD的方法来解决集群故障,但有时会遇到OSD down却无法重启的状况。

# OSD down 导致 PG故障,查看OSD,发现位于node-2节点上的osd.1 down
[root@node-2 ~]# ceph health detail
HEALTH_WARN Degraded data redundancy: 53852/161556 objects degraded (33.333%), 209 pgs degraded, 464 pgs undersized
PG_DEGRADED Degraded data redundancy: 53852/161556 objects degraded (33.333%), 209 pgs degraded, 464 pgs undersized
...
[root@node-2 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.03918 root default
-3 0.01959 host node-1
0 hdd 0.00980 osd.0 up 1.00000 1.00000
3 hdd 0.00980 osd.3 up 0.09999 1.00000
-5 0.00980 host node-2
1 hdd 0.00980 osd.1 down 0 1.00000
-7 0.00980 host node-3
2 hdd 0.00980 osd.2 up 1.00000 1.00000

# 尝试重启 OSD,发现无法重启
[root@node-2 ~]# systemctl restart ceph-osd@1
Job for ceph-osd@1.service failed because start of the service was attempted too often. See "systemctl status ceph-osd@1.service" and "journalctl -xe" for details.
To force a start use "systemctl reset-failed ceph-osd@1.service" followed by "systemctl start ceph-osd@1.service" again.
[root@node-2 ~]# systemctl stop ceph-osd@1
[root@node-2 ~]# systemctl restart ceph-osd@1
Job for ceph-osd@1.service failed because start of the service was attempted too often. See "systemctl status ceph-osd@1.service" and "journalctl -xe" for details.
To force a start use "systemctl reset-failed ceph-osd@1.service" followed by "systemctl start ceph-osd@1.service" again.
[root@node-2 ~]# systemctl stop ceph-osd@1
[root@node-2 ~]# systemctl restart ceph-osd@1
Job for ceph-osd@1.service failed because start of the service was attempted too often. See "systemctl status ceph-osd@1.service" and "journalctl -xe" for details.
To force a start use "systemctl reset-failed ceph-osd@1.service" followed by "systemctl start ceph-osd@1.service" again

 
遇到以上问题,有以下三种方案:

如果down的OSD不影响集群写入,即PG状态为degrade,可以尝试等待一段时间后重启OSD。
或者,如果服务器上无其他重要服务,可以尝试重启服务器。
上述两条方案无法解决,可以手动删除OSD后再重建OSD。
下面给出手动删除OSD再重新创建OSD的例子:

# 本例中删除node-2节点上的osd.1

# 1.
[root@node-1 ~]# ceph osd rm osd.1
removed osd.1
[root@node-1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.03918 root default
-3 0.01959 host node-1
0 hdd 0.00980 osd.0 up 1.00000 1.00000
3 hdd 0.00980 osd.3 up 0.09999 1.00000
-5 0.00980 host node-2
1 hdd 0.00980 osd.1 DNE 0
-7 0.00980 host node-3
2 hdd 0.00980 osd.2 up 1.00000 1.00000

# 2.
[root@node-1 ~]# ceph osd crush rm osd.1
removed item id 1 name 'osd.1' from crush map
[root@node-1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.02939 root default
-3 0.01959 host node-1
0 hdd 0.00980 osd.0 up 1.00000 1.00000
3 hdd 0.00980 osd.3 up 0.09999 1.00000
-5 0 host node-2
-7 0.00980 host node-3
2 hdd 0.00980 osd.2 up 1.00000 1.00000

# 3.
[root@node-1 ~]# ceph auth del osd.1
updated
[root@node-1 ~]# ceph auth ls | grep osd.1
installed auth entries:

# 4.检查集群状态,由于集群是3副本,因此少了一个OSD,所有PG状态依然active,不影响集群读写
[root@node-1 ~]# ceph -s
cluster:
id: 60e065f1-d992-4d1a-8f4e-f74419674f7e
health: HEALTH_WARN
Degraded data redundancy: 53157/161724 objects degraded (32.869%), 207 pgs degraded, 461 pgs undersized

services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 21m)
mgr: node-1(active, since 57m)
mds: cephfs:2 {0=mds1=up:active,1=mds2=up:active} 1 up:standby
osd: 3 osds: 3 up (since 2m), 3 in (since 24m); 4 remapped pgs
rgw: 1 daemon active (node-2)

task status:

data:
pools: 9 pools, 464 pgs
objects: 53.91k objects, 802 MiB
usage: 11 GiB used, 19 GiB / 30 GiB avail
pgs: 53157/161724 objects degraded (32.869%)
816/161724 objects misplaced (0.505%)
254 active+undersized
206 active+undersized+degraded
3 active+clean+remapped
1 active+undersized+degraded+remapped+backfilling

io:
recovery: 35 KiB/s, 7 objects/s


# 5.到node-2上卸载 /var/lib/ceph/osd/ceph-1/,此目录下放OSD.1的相关内容,并有一个链接到具体磁盘的/block文件
[root@node-1 ~]# ssh node-2
Last login: Mon Oct 18 09:48:25 2021 from node-1
[root@node-2 ~]# umount /var/lib/ceph/osd/ceph-1/
[root@node-2 ~]# rm -rf /var/lib/ceph/osd/ceph-1/

# 6.node-2节点上OSD.1映射的磁盘为/dev/sdb,后续要重建的也是这个磁盘
[root@node-2 ~]# ceph-volume lvm list


====== osd.1 =======

[block] /dev/ceph-847ed937-dfbb-485e-af90-9cf27bf08c99/osd-block-66119fd9-226d-4665-b2cc-2b6564b7d715

block device /dev/ceph-847ed937-dfbb-485e-af90-9cf27bf08c99/osd-block-66119fd9-226d-4665-b2cc-2b6564b7d715
block uuid 9owOrT-EMVD-c2kY-53Xj-2ECv-0Kji-euIRkX
cephx lockbox secret
cluster fsid 60e065f1-d992-4d1a-8f4e-f74419674f7e
cluster name ceph
crush device class None
encrypted 0
osd fsid 66119fd9-226d-4665-b2cc-2b6564b7d715
osd id 1
osdspec affinity
type block
vdo 0
devices /dev/sdb


# 7.格式化,先执行dmsetup remove {设备号},再执行格式化
[root@node-2 ~]# dmsetup ls
ceph--847ed937--dfbb--485e--af90--9cf27bf08c99-osd--block--66119fd9--226d--4665--b2cc--2b6564b7d715 (253:2)
centos-swap (253:1)
centos-root (253:0)
[root@node-2 ~]# dmsetup remove ceph--847ed937--dfbb--485e--af90--9cf27bf08c99-osd--block--66119fd9--226d--4665--b2cc--2b6564b7d715
[root@node-2 ~]# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb isize=512 agcount=4, agsize=655360 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0

# 8.重新创建OSD
[root@node-2 ~]# ceph-volume lvm create --data /dev/sdb
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 1e8fb7ca-a870-414b-930d-1e22f32eb84b
Running command: /usr/sbin/vgcreate --force --yes ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254 /dev/sdb
stdout: Wiping xfs signature on /dev/sdb.
stdout: Physical volume "/dev/sdb" successfully created.
stdout: Volume group "ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254" successfully created
Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254
stdout: Logical volume "osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254/osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254/osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
stderr: 2021-10-18 13:53:29.353 7f369a1e2700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2021-10-18 13:53:29.353 7f369a1e2700 -1 AuthRegistry(0x7f36940662f8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
stderr: got monmap epoch 3
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQDYC21hGa9KKRAAZoHogGn9ouEPpb9RY3/FXw==
stdout: creating /var/lib/ceph/osd/ceph-1/keyring
added entity osd.1 auth(key=AQDYC21hGa9KKRAAZoHogGn9ouEPpb9RY3/FXw==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 1e8fb7ca-a870-414b-930d-1e22f32eb84b --setuser ceph --setgroup ceph
stderr: 2021-10-18 13:53:29.899 7fa4e1df1a80 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254/osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b --path /var/lib/ceph/osd/ceph-1 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-2c45e0ec-5edd-4df8-b6b0-af18ef077254/osd-block-1e8fb7ca-a870-414b-930d-1e22f32eb84b /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/systemctl enable ceph-volume@lvm-1-1e8fb7ca-a870-414b-930d-1e22f32eb84b
stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-1e8fb7ca-a870-414b-930d-1e22f32eb84b.service to /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@1
Running command: /usr/bin/systemctl start ceph-osd@1
--> ceph-volume lvm activate successful for osd ID: 1
--> ceph-volume lvm create successful for: /dev/sdb

# 9.检查集群,等待数据慢慢迁移恢复
[root@node-2 ~]# ceph -s
cluster:
id: 60e065f1-d992-4d1a-8f4e-f74419674f7e
health: HEALTH_WARN
Degraded data redundancy: 53237/161556 objects degraded (32.953%), 213 pgs degraded

services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 4h)
mgr: node-1(active, since 5h)
mds: cephfs:2 {0=mds1=up:active,1=mds2=up:active} 1 up:standby
osd: 4 osds: 4 up (since 38s), 4 in (since 4h); 111 remapped pgs
rgw: 1 daemon active (node-2)

task status:

data:
pools: 9 pools, 464 pgs
objects: 53.85k objects, 802 MiB
usage: 12 GiB used, 28 GiB / 40 GiB avail
pgs: 53237/161556 objects degraded (32.953%)
814/161556 objects misplaced (0.504%)
241 active+clean
103 active+recovery_wait+degraded
63 active+undersized+degraded+remapped+backfill_wait
46 active+recovery_wait+undersized+degraded+remapped
9 active+recovery_wait
1 active+recovering+undersized+degraded+remapped
1 active+remapped+backfill_wait

io:
recovery: 0 B/s, 43 keys/s, 2 objects/s
[root@node-2 ~]# ceph osd df
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS
0 hdd 0.00980 1.00000 10 GiB 4.8 GiB 3.8 GiB 20 MiB 1004 MiB 5.2 GiB 47.59 1.57 440 up
3 hdd 0.00980 0.09999 10 GiB 1.3 GiB 265 MiB 1.5 MiB 1022 MiB 8.7 GiB 12.59 0.42 25 up
1 hdd 0.00980 1.00000 10 GiB 1.2 GiB 212 MiB 0 B 1 GiB 8.8 GiB 12.08 0.40 361 up
2 hdd 0.00980 1.00000 10 GiB 4.9 GiB 3.9 GiB 22 MiB 1002 MiB 5.1 GiB 48.85 1.61 464 up
TOTAL 40 GiB 12 GiB 8.1 GiB 44 MiB 4.0 GiB 28 GiB 30.28
MIN/MAX VAR: 0.40/1.61 STDDEV: 18.02

 
重建OSD需要注意的是,如果你的集群中对crush map做了特别定制,那么还需要去检查crush map。

在OSD恢复过程中,可能会影响集群对外提供的io服务。这里给出以下可修改配置。

参考链接:
https://www.cnblogs.com/gzxbkk/p/7704464.html
http://strugglesquirrel.com/2019/02/02/ceph%E8%BF%90%E7%BB%B4%E5%A4%A7%E5%AE%9D%E5%89%91%E4%B9%8B%E9%9B%86%E7%BE%A4osd%E4%B8%BAfull/

为了避免pg开始迁移后造成较大的压力导致osd挂掉,先在配置文件global中写入如下配置

osd_op_thread_suicide_timeout = 900
osd_op_thread_timeout = 900
osd_recovery_thread_suicide_timeout = 900
osd_heartbeat_grace = 900
 
磁盘恢复速度配置,其实默认的速度已经比较写了,如果想要加快迁移速度,可以尝试调制下列参数

osd_recovery_max_single_start #越大,OSD恢复速度越快,集群对外服务受到影响越大,默认为1
osd_recovery_max_active #越大,OSD恢复速度越快,集群对外服务受到影响越大,默认为3
osd_recovery_op_priority #越大,OSD恢复速度越快,集群对外服务受到影响越大,默认为3
osd_max_backfills #越大,OSD恢复速度越快,集群对外服务受到影响越大,默认为1
osd_recovery_sleep #越小,OSD恢复速度越快,集群对外服务受到影响越大,默认为0秒

[root@node-2 ~]# ceph config set osd osd_recovery_max_single_start 32
[root@node-2 ~]# ceph config set osd osd_max_backfills 32
[root@node-2 ~]# ceph config set osd osd_recovery_max_active 32
设置后,OSD恢复速度大大加快,注意这三个配置要同步增加,否则只增加其中一个,会由于其他短板而无法使得恢复速度加快

# ceph -s 检查恢复速度,设置上述参数前,恢复速度只有6 objects/s。
[root@node-2 ~]# ceph -s
io:
recovery: 133 KiB/s, 35 objects/s
 
附上配置操控命令

# 查看所有配置
ceph config ls

# 查看配置默认信息
[root@node-2 ~]# ceph config help osd_recovery_sleep
osd_recovery_sleep - Time in seconds to sleep before next recovery or backfill op
(float, advanced)
Default: 0.000000
Can update at runtime: true

# 查看已经自定义配置
[root@node-2 ~]# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
mon advanced mon_warn_on_insecure_global_id_reclaim false
mon advanced mon_warn_on_insecure_global_id_reclaim_allowed false
mgr advanced mgr/balancer/active false
mgr advanced mgr/balancer/mode upmap
mgr advanced mgr/balancer/sleep_interval 60
mds advanced mds_session_blacklist_on_evict true
mds advanced mds_session_blacklist_on_timeout true
client advanced client_reconnect_stale true
client advanced debug_client 20/20

# 修改配置
ceph config set {mon/osd/client/mgr/..} [config_name] [value]
ceph config set client client_reconnect_stale true

# 查询指定自定义配置 (osd.osd mon.mon mgr.mgr, 得这样写)
$ ceph config get client.client
WHO MASK LEVEL OPTION VALUE RO
client advanced client_reconnect_stale true
client advanced debug_client 20/20

# 删除自定义配置
$ ceph config rm [who] [name]





原文链接:https://blog.csdn.net/DeamonXiao/article/details/120879236

「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论