暂无图片
暂无图片
暂无图片
暂无图片
暂无图片

ceph 17.2.5单机安装100.000% pgs not active

原创 jieguo 2023-01-05
2309

测试环境,4块ssd,两块224G,两块112G,其中一块224G的做系统盘。
离线安装ceph17.2.5后发现有块ssd曾经做过osd,需要擦除盘;同时还有报错:Reduced data availability: 1 pg inactive
100.000% pgs not active

相关参考:
擦除osd:
https://blog.csdn.net/jycjyc/article/details/127424957
100.000% pgs not active处理:
https://www.cnblogs.com/boshen-hzb/p/13305560.html
Reduced data availability: 1 pg inactive, 1 pg stale不适用此处
https://www.antute.com.cn/index.php?id=258
处理过程如下:

root@omnisky:/etc/ceph# cat /etc/issue
Ubuntu 20.04.5 LTS \n \l
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum omnisky (age 88s)
    mgr: omnisky.bvogdw(active, since 80s)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
root@omnisky:~# free -g
              total        used        free      shared  buff/cache   available
Mem:            125           1         123           0           0         122
Swap:             7           0           7
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--7fd2e0a4--3e07--4b99--af47--a62d0aebd039-osd--block--67ac017a--0ce6--4138--8ab7--d319879076ed 253:0    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
root@omnisky:~# ceph orch ls
NAME           PORTS        RUNNING  REFRESHED  AGE  PLACEMENT  
alertmanager   ?:9093,9094      1/1  3m ago     40m  count:1    
crash                           1/1  3m ago     40m  *          
grafana        ?:3000           1/1  3m ago     40m  count:1    
mgr                             1/2  3m ago     40m  count:2    
mon                             1/5  3m ago     40m  count:5    
node-exporter  ?:9100           1/1  3m ago     40m  *          
prometheus     ?:9095           1/1  3m ago     40m  count:1    
root@omnisky:~# ceph orch ps
NAME                   HOST     PORTS        STATUS        REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
alertmanager.omnisky   omnisky  *:9093,9094  running (4m)     4m ago  40m    17.2M        -           ba2b418f427c  4d2fff5300c2  
crash.omnisky          omnisky               running (4m)     4m ago  40m    13.5M        -  17.2.5   cc65afd6173a  a3d185468b46  
grafana.omnisky        omnisky  *:3000       running (4m)     4m ago  40m    50.0M        -  8.3.5    dad864ee21e9  6f8409ec6692  
mgr.omnisky.bvogdw     omnisky  *:9283       running (4m)     4m ago  41m     468M        -  17.2.5   cc65afd6173a  b08b8fdf5cc9  
mon.omnisky            omnisky               running (4m)     4m ago  41m    25.0M    2048M  17.2.5   cc65afd6173a  68caf6c67090  
node-exporter.omnisky  omnisky  *:9100       running (4m)     4m ago  40m    5736k        -           1dbe0e931976  1b372159b1d4  
prometheus.omnisky     omnisky  *:9095       running (4m)     4m ago  40m    41.9M        -           514e6a882f6e  64a1f7963e87  
root@omnisky:~# docker ps
CONTAINER ID   IMAGE                                     COMMAND                  CREATED          STATUS         PORTS                                       NAMES
03e9a7d3d9da   192.168.207.34:5000/ceph                  "/usr/sbin/ceph-volu…"   1 second ago     Up 1 second                                                quirky_bardeen
b08b8fdf5cc9   192.168.207.34:5000/ceph:v17              "/usr/bin/ceph-mgr -…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-mgr-omnisky-bvogdw
6f8409ec6692   quay.io/ceph/ceph-grafana:8.3.5           "/bin/sh -c 'grafana…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-grafana-omnisky
a3d185468b46   192.168.207.34:5000/ceph                  "/usr/bin/ceph-crash…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-crash-omnisky
4d2fff5300c2   quay.io/prometheus/alertmanager:v0.23.0   "/bin/alertmanager -…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-alertmanager-omnisky
68caf6c67090   192.168.207.34:5000/ceph:v17              "/usr/bin/ceph-mon -…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-mon-omnisky
1b372159b1d4   quay.io/prometheus/node-exporter:v1.3.1   "/bin/node_exporter …"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-node-exporter-omnisky
64a1f7963e87   quay.io/prometheus/prometheus:v2.33.4     "/bin/prometheus --c…"   4 minutes ago    Up 4 minutes                                               ceph-7286e65a-8bfa-11ed-974b-dba0548c492a-prometheus-omnisky
0b2cce9e9e69   registry:latest                           "/entrypoint.sh /etc…"   53 minutes ago   Up 4 minutes   0.0.0.0:5000->5000/tcp, :::5000->5000/tcp   registry
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS                                                 
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        3s ago                                                                    
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G             3s ago     Insufficient space (<10 extents) on vgs, LVM detected, locked  
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        3s ago                                                                    
root@omnisky:~# ceph osd df
ID  CLASS  WEIGHT  REWEIGHT  SIZE  RAW USE  DATA  OMAP  META  AVAIL  %USE  VAR  PGS  STATUS
                      TOTAL   0 B      0 B   0 B   0 B   0 B    0 B     0                  
MIN/MAX VAR: -/-  STDDEV: 0

root@omnisky:~# hostname
omnisky
root@omnisky:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 omnisky

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS                                                 
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        2s ago                                                                    
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G             2s ago     Insufficient space (<10 extents) on vgs, LVM detected, locked  
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        2s ago                                                                    
root@omnisky:~# lsblk|grep ceph
└─ceph--7fd2e0a4--3e07--4b99--af47--a62d0aebd039-osd--block--67ac017a--0ce6--4138--8ab7--d319879076ed 253:0    0 111.8G  0 lvm  
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--7fd2e0a4--3e07--4b99--af47--a62d0aebd039-osd--block--67ac017a--0ce6--4138--8ab7--d319879076ed 253:0    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
root@omnisky:~# ceph orch device zap omnisky /dev/sdc
Error EINVAL: must pass --force to PERMANENTLY ERASE DEVICE DATA
root@omnisky:~# ceph orch device zap omnisky /dev/sdc --force
zap successful for /dev/sdc on omnisky
root@omnisky:~# lsblk
NAME                      MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                       7:0    0    62M  1 loop /snap/core20/1611
loop1                       7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                       7:2    0    47M  1 loop /snap/snapd/16292
sda                         8:0    0 223.6G  0 disk 
├─sda1                      8:1    0   1.1G  0 part /boot/efi
├─sda2                      8:2    0     2G  0 part /boot
└─sda3                      8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv 253:1    0 220.5G  0 lvm  /
sdb                         8:16   0 223.6G  0 disk 
sdc                         8:32   0 111.8G  0 disk 
sdd                         8:48   0 111.8G  0 disk 
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        6s ago                     
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        6s ago                     
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        6s ago                     
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        2s ago                     
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        2s ago                     
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        2s ago                     
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        4s ago                     
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        4s ago                     
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        4s ago                     
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        5s ago                     
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        5s ago                     
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        5s ago                     
root@omnisky:~# top
top - 15:31:51 up 17 min,  2 users,  load average: 1.26, 0.90, 0.53
Tasks: 650 total,   1 running, 649 sleeping,   0 stopped,   0 zombie
%Cpu(s):  0.1 us,  0.4 sy,  0.0 ni, 99.5 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
MiB Mem : 128574.1 total, 125699.6 free,   1886.6 used,    987.9 buff/cache
MiB Swap:   8192.0 total,   8192.0 free,      0.0 used. 125770.2 avail Mem 

    PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND                                                                                                 
 249280 root      20   0   10052   4808   3472 R  11.8   0.0   0:00.03 top                                                                                                     
      1 root      20   0  168420  11920   8500 S   0.0   0.0   0:09.40 systemd                                                                                                 
      2 root      20   0       0      0      0 S   0.0   0.0   0:00.03 kthreadd                                                                                                
      3 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 rcu_gp                                                                                                  
      4 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 rcu_par_gp                                                                                              
      6 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/0:0H-events_highpri                                                                             
      8 root       0 -20       0      0      0 I   0.0   0.0   0:00.01 kworker/0:1H-kblockd                                                                                    
     10 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 mm_percpu_wq                                                                                            
     11 root      20   0       0      0      0 S   0.0   0.0   0:00.02 ksoftirqd/0                                                                                             
     12 root      20   0       0      0      0 I   0.0   0.0   0:01.36 rcu_sched                                                                                               
     13 root      rt   0       0      0      0 S   0.0   0.0   0:00.00 migration/0                                                                                             
     14 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/0                                                                                           
     16 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/0                                                                                                 
     17 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/1                                                                                                 
     18 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/1                                                                                           
     19 root      rt   0       0      0      0 S   0.0   0.0   0:01.05 migration/1                                                                                             
     20 root      20   0       0      0      0 S   0.0   0.0   0:00.01 ksoftirqd/1                                                                                             
     22 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/1:0H-kblockd                                                                                    
     23 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/2                                                                                                 
     24 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/2                                                                                           
     25 root      rt   0       0      0      0 S   0.0   0.0   0:01.06 migration/2                                                                                             
     26 root      20   0       0      0      0 S   0.0   0.0   0:00.01 ksoftirqd/2                                                                                             
     28 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/2:0H-kblockd                                                                                    
     29 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/3                                                                                                 
     30 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/3                                                                                           
     31 root      rt   0       0      0      0 S   0.0   0.0   0:01.06 migration/3                                                                                             
     32 root      20   0       0      0      0 S   0.0   0.0   0:00.01 ksoftirqd/3                                                                                             
     34 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/3:0H-kblockd                                                                                    
     35 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/4                                                                                                 
     36 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/4                                                                                           
     37 root      rt   0       0      0      0 S   0.0   0.0   0:01.07 migration/4                                                                                             
     38 root      20   0       0      0      0 S   0.0   0.0   0:00.00 ksoftirqd/4                                                                                             
     40 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/4:0H-kblockd                                                                                    
     41 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/5                                                                                                 
     42 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/5                                                                                           
     43 root      rt   0       0      0      0 S   0.0   0.0   0:01.08 migration/5                                                                                             
     44 root      20   0       0      0      0 S   0.0   0.0   0:00.01 ksoftirqd/5                                                                                             
     46 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/5:0H-kblockd                                                                                    
     47 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/6                                                                                                 
     48 root     -51   0       0      0      0 S   0.0   0.0   0:00.00 idle_inject/6                                                                                           
     49 root      rt   0       0      0      0 S   0.0   0.0   0:01.08 migration/6                                                                                             
     50 root      20   0       0      0      0 S   0.0   0.0   0:00.00 ksoftirqd/6                                                                                             
     52 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 kworker/6:0H-kblockd                                                                                    
     53 root      20   0       0      0      0 S   0.0   0.0   0:00.00 cpuhp/7                                                                                                 

root@omnisky:~# lsblk
NAME                      MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                       7:0    0    62M  1 loop /snap/core20/1611
loop1                       7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                       7:2    0    47M  1 loop /snap/snapd/16292
sda                         8:0    0 223.6G  0 disk 
├─sda1                      8:1    0   1.1G  0 part /boot/efi
├─sda2                      8:2    0     2G  0 part /boot
└─sda3                      8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv 253:1    0 220.5G  0 lvm  /
sdb                         8:16   0 223.6G  0 disk 
sdc                         8:32   0 111.8G  0 disk 
sdd                         8:48   0 111.8G  0 disk 
root@omnisky:~# lsblk
NAME                      MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                       7:0    0    62M  1 loop /snap/core20/1611
loop1                       7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                       7:2    0    47M  1 loop /snap/snapd/16292
sda                         8:0    0 223.6G  0 disk 
├─sda1                      8:1    0   1.1G  0 part /boot/efi
├─sda2                      8:2    0     2G  0 part /boot
└─sda3                      8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv 253:1    0 220.5G  0 lvm  /
sdb                         8:16   0 223.6G  0 disk 
sdc                         8:32   0 111.8G  0 disk 
sdd                         8:48   0 111.8G  0 disk 
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        10s ago                    
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        10s ago                    
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        10s ago                    
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
└─ceph--5a1a8d5a--48df--4f3f--9da5--dc7eb354f245-osd--block--4e239b92--954a--4486--ade1--cf97cc427359 253:0    0 223.6G  0 lvm  
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--38cd21fb--7190--4963--8a7b--1a8cde41168e-osd--block--621737b7--5e68--4b65--b948--ad59e13ba134 253:2    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
└─ceph--5a1a8d5a--48df--4f3f--9da5--dc7eb354f245-osd--block--4e239b92--954a--4486--ade1--cf97cc427359 253:0    0 223.6G  0 lvm  
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--38cd21fb--7190--4963--8a7b--1a8cde41168e-osd--block--621737b7--5e68--4b65--b948--ad59e13ba134 253:2    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
└─ceph--7feb4b0b--4b45--41dc--a2f7--6db878c716ba-osd--block--40243770--191c--44b7--981a--70d20d2cd191 253:3    0 111.8G  0 lvm  
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
└─ceph--5a1a8d5a--48df--4f3f--9da5--dc7eb354f245-osd--block--4e239b92--954a--4486--ade1--cf97cc427359 253:0    0 223.6G  0 lvm  
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--38cd21fb--7190--4963--8a7b--1a8cde41168e-osd--block--621737b7--5e68--4b65--b948--ad59e13ba134 253:2    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
└─ceph--7feb4b0b--4b45--41dc--a2f7--6db878c716ba-osd--block--40243770--191c--44b7--981a--70d20d2cd191 253:3    0 111.8G  0 lvm  
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        25s ago                    
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        25s ago                    
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        25s ago                    
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS  
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G  Yes        28s ago                    
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G  Yes        28s ago                    
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G  Yes        28s ago                    
root@omnisky:~# ceph orch device ls
HOST     PATH      TYPE  DEVICE ID                           SIZE  AVAILABLE  REFRESHED  REJECT REASONS                                                 
omnisky  /dev/sdb  ssd   KINGSTON_SA400S3_50026B7381597160   240G             15s ago    Insufficient space (<10 extents) on vgs, LVM detected, locked  
omnisky  /dev/sdc  ssd   KINGSTON_SA400S3_50026B7381555818   120G             15s ago    Insufficient space (<10 extents) on vgs, LVM detected, locked  
omnisky  /dev/sdd  ssd   KINGSTON_SA400S3_50026B73815555D3   120G             15s ago    Insufficient space (<10 extents) on vgs, LVM detected, locked  
root@omnisky:~# lsblk
NAME                                                                                                  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
loop0                                                                                                   7:0    0    62M  1 loop /snap/core20/1611
loop1                                                                                                   7:1    0  67.8M  1 loop /snap/lxd/22753
loop2                                                                                                   7:2    0    47M  1 loop /snap/snapd/16292
sda                                                                                                     8:0    0 223.6G  0 disk 
├─sda1                                                                                                  8:1    0   1.1G  0 part /boot/efi
├─sda2                                                                                                  8:2    0     2G  0 part /boot
└─sda3                                                                                                  8:3    0 220.5G  0 part 
  └─ubuntu--vg-ubuntu--lv                                                                             253:1    0 220.5G  0 lvm  /
sdb                                                                                                     8:16   0 223.6G  0 disk 
└─ceph--5a1a8d5a--48df--4f3f--9da5--dc7eb354f245-osd--block--4e239b92--954a--4486--ade1--cf97cc427359 253:0    0 223.6G  0 lvm  
sdc                                                                                                     8:32   0 111.8G  0 disk 
└─ceph--38cd21fb--7190--4963--8a7b--1a8cde41168e-osd--block--621737b7--5e68--4b65--b948--ad59e13ba134 253:2    0 111.8G  0 lvm  
sdd                                                                                                     8:48   0 111.8G  0 disk 
└─ceph--7feb4b0b--4b45--41dc--a2f7--6db878c716ba-osd--block--40243770--191c--44b7--981a--70d20d2cd191 253:3    0 111.8G  0 lvm  
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
 
  services:
    mon: 1 daemons, quorum omnisky (age 19m)
    mgr: omnisky.bvogdw(active, since 19m)
    osd: 3 osds: 3 up (since 63s), 3 in (since 89s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 19m)
    mgr: omnisky.bvogdw(active, since 19m)
    osd: 3 osds: 3 up (since 67s), 3 in (since 93s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 19m)
    mgr: omnisky.bvogdw(active, since 19m)
    osd: 3 osds: 3 up (since 70s), 3 in (since 96s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 19m)
    mgr: omnisky.bvogdw(active, since 19m)
    osd: 3 osds: 3 up (since 71s), 3 in (since 97s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 19m)
    mgr: omnisky.bvogdw(active, since 19m)
    osd: 3 osds: 3 up (since 72s), 3 in (since 98s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 20m)
    mgr: omnisky.bvogdw(active, since 20m)
    osd: 3 osds: 3 up (since 102s), 3 in (since 2m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph health
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
root@omnisky:~# ceph health --detail
--detail not valid:  --detail not in detail
Invalid command: unused arguments: ['--detail']
health [<detail:detail>] :  show cluster health
Error EINVAL: invalid command
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 2m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 119s, current state undersized+peered, last acting [1]
root@omnisky:~# ceph osd status
ID  HOST      USED  AVAIL  WR OPS  WR DATA  RD OPS  RD DATA  STATE      
 0  omnisky  7720k   223G      0        0       0        0   exists,up  
 1  omnisky  7720k   111G      0        0       0        0   exists,up  
 2  omnisky  7656k   111G      0        0       0        0   exists,up  
root@omnisky:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME         STATUS  REWEIGHT  PRI-AFF
-1         0.43668  root default                               
-3         0.43668      host omnisky                           
 0    ssd  0.21829          osd.0         up   1.00000  1.00000
 1    ssd  0.10919          osd.1         up   1.00000  1.00000
 2    ssd  0.10919          osd.2         up   1.00000  1.00000
root@omnisky:~# ceph pg 1.0 query
{
    "snap_trimq": "[]",
    "snap_trimq_len": 0,
    "state": "undersized+peered",
    "epoch": 19,
    "up": [
        1
    ],
    "acting": [
        1
    ],
    "acting_recovery_backfill": [
        "1"
    ],
    "info": {
        "pgid": "1.0",
        "last_update": "0'0",
        "last_complete": "0'0",
        "log_tail": "0'0",
        "last_user_version": 0,
        "last_backfill": "MAX",
        "purged_snaps": [],
        "history": {
            "epoch_created": 17,
            "epoch_pool_created": 17,
            "last_epoch_started": 0,
            "last_interval_started": 0,
            "last_epoch_clean": 0,
            "last_interval_clean": 0,
            "last_epoch_split": 0,
            "last_epoch_marked_full": 0,
            "same_up_since": 17,
            "same_interval_since": 17,
            "same_primary_since": 17,
            "last_scrub": "0'0",
            "last_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "last_deep_scrub": "0'0",
            "last_deep_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "last_clean_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "prior_readable_until_ub": 0
        },
        "stats": {
            "version": "0'0",
            "reported_seq": 7,
            "reported_epoch": 19,
            "state": "undersized+peered",
            "last_fresh": "2023-01-04T07:34:50.412769+0000",
            "last_change": "2023-01-04T07:34:47.141812+0000",
            "last_active": "2023-01-04T07:34:46.127918+0000",
            "last_peered": "2023-01-04T07:34:50.412769+0000",
            "last_clean": "2023-01-04T07:34:46.127918+0000",
            "last_became_active": "0.000000",
            "last_became_peered": "2023-01-04T07:34:47.141812+0000",
            "last_unstale": "2023-01-04T07:34:50.412769+0000",
            "last_undegraded": "2023-01-04T07:34:50.412769+0000",
            "last_fullsized": "2023-01-04T07:34:47.140382+0000",
            "mapping_epoch": 17,
            "log_start": "0'0",
            "ondisk_log_start": "0'0",
            "created": 17,
            "last_epoch_clean": 0,
            "parent": "0.0",
            "parent_split_bits": 0,
            "last_scrub": "0'0",
            "last_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "last_deep_scrub": "0'0",
            "last_deep_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "last_clean_scrub_stamp": "2023-01-04T07:34:46.127918+0000",
            "objects_scrubbed": 0,
            "log_size": 0,
            "ondisk_log_size": 0,
            "stats_invalid": false,
            "dirty_stats_invalid": false,
            "omap_stats_invalid": false,
            "hitset_stats_invalid": false,
            "hitset_bytes_stats_invalid": false,
            "pin_stats_invalid": false,
            "manifest_stats_invalid": false,
            "snaptrimq_len": 0,
            "last_scrub_duration": 0,
            "scrub_schedule": "periodic scrub scheduled @ 2023-01-05T10:17:58.864527+0000",
            "scrub_duration": 0,
            "objects_trimmed": 0,
            "snaptrim_duration": 0,
            "stat_sum": {
                "num_bytes": 0,
                "num_objects": 0,
                "num_object_clones": 0,
                "num_object_copies": 0,
                "num_objects_missing_on_primary": 0,
                "num_objects_missing": 0,
                "num_objects_degraded": 0,
                "num_objects_misplaced": 0,
                "num_objects_unfound": 0,
                "num_objects_dirty": 0,
                "num_whiteouts": 0,
                "num_read": 0,
                "num_read_kb": 0,
                "num_write": 0,
                "num_write_kb": 0,
                "num_scrub_errors": 0,
                "num_shallow_scrub_errors": 0,
                "num_deep_scrub_errors": 0,
                "num_objects_recovered": 0,
                "num_bytes_recovered": 0,
                "num_keys_recovered": 0,
                "num_objects_omap": 0,
                "num_objects_hit_set_archive": 0,
                "num_bytes_hit_set_archive": 0,
                "num_flush": 0,
                "num_flush_kb": 0,
                "num_evict": 0,
                "num_evict_kb": 0,
                "num_promote": 0,
                "num_flush_mode_high": 0,
                "num_flush_mode_low": 0,
                "num_evict_mode_some": 0,
                "num_evict_mode_full": 0,
                "num_objects_pinned": 0,
                "num_legacy_snapsets": 0,
                "num_large_omap_objects": 0,
                "num_objects_manifest": 0,
                "num_omap_bytes": 0,
                "num_omap_keys": 0,
                "num_objects_repaired": 0
            },
            "up": [
                1
            ],
            "acting": [
                1
            ],
            "avail_no_missing": [
                "1"
            ],
            "object_location_counts": [],
            "blocked_by": [],
            "up_primary": 1,
            "acting_primary": 1,
            "purged_snaps": []
        },
        "empty": 1,
        "dne": 0,
        "incomplete": 0,
        "last_epoch_started": 0,
        "hit_set_history": {
            "current_last_update": "0'0",
            "history": []
        }
    },
    "peer_info": [],
    "recovery_state": [
        {
            "name": "Started/Primary/Active",
            "enter_time": "2023-01-04T07:34:47.140450+0000",
            "might_have_unfound": [],
            "recovery_progress": {
                "backfill_targets": [],
                "waiting_on_backfill": [],
                "last_backfill_started": "MIN",
                "backfill_info": {
                    "begin": "MIN",
                    "end": "MIN",
                    "objects": []
                },
                "peer_backfill_info": [],
                "backfills_in_flight": [],
                "recovering": [],
                "pg_backend": {
                    "pull_from_peer": [],
                    "pushing": []
                }
            }
        },
        {
            "name": "Started",
            "enter_time": "2023-01-04T07:34:46.138105+0000"
        }
    ],
    "agent_state": {}
}
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 4m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 4m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph pg repair 1.0

instructing pg 1.0 on osd.1 to repair
root@omnisky:~# 
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 4m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 4m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 4m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 4m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph pg dump_stuck  inactive
PG_STAT  STATE              UP   UP_PRIMARY  ACTING  ACTING_PRIMARY
1.0      undersized+peered  [1]           1     [1]               1
ok
root@omnisky:~# ceph pg dump_stuck   unclean
PG_STAT  STATE              UP   UP_PRIMARY  ACTING  ACTING_PRIMARY
1.0      undersized+peered  [1]           1     [1]               1
ok
root@omnisky:~# 
root@omnisky:~# 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 23m)
    mgr: omnisky.bvogdw(active, since 23m)
    osd: 3 osds: 3 up (since 5m), 3 in (since 5m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph osd force-create-pg 1.0   --yes-i-really-mean-it


pg 1.0 now creating, ok
root@omnisky:~# 
root@omnisky:~# 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 24m)
    mgr: omnisky.bvogdw(active, since 24m)
    osd: 3 osds: 3 up (since 5m), 3 in (since 6m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 24m)
    mgr: omnisky.bvogdw(active, since 24m)
    osd: 3 osds: 3 up (since 5m), 3 in (since 6m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             1 undersized+peered
 
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 6m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 5m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 7m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 7m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph health detail
HEALTH_WARN Reduced data availability: 1 pg inactive; Degraded data redundancy: 1 pg undersized
[WRN] PG_AVAILABILITY: Reduced data availability: 1 pg inactive
    pg 1.0 is stuck inactive for 7m, current state undersized+peered, last acting [1]
[WRN] PG_DEGRADED: Degraded data redundancy: 1 pg undersized
    pg 1.0 is stuck undersized for 7m, current state undersized+peered, last acting [1]
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 26m)
    mgr: omnisky.bvogdw(active, since 26m)
    osd: 3 osds: 3 up (since 8m), 3 in (since 8m)
 
  data:
    pools:   2 pools, 2 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             2 undersized+peered
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 33 pgs inactive
            Degraded data redundancy: 33 pgs undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 28m)
    mgr: omnisky.bvogdw(active, since 28m)
    osd: 3 osds: 3 up (since 9m), 3 in (since 10m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             33 undersized+peered
 
  progress:
    Global Recovery Event (0s)
      [............................] 
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 33 pgs inactive
            Degraded data redundancy: 33 pgs undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 28m)
    mgr: omnisky.bvogdw(active, since 28m)
    osd: 3 osds: 3 up (since 10m), 3 in (since 10m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             33 undersized+peered
 
  progress:
    Global Recovery Event (0s)
      [............................] 
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 33 pgs inactive
            Degraded data redundancy: 33 pgs undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 28m)
    mgr: omnisky.bvogdw(active, since 28m)
    osd: 3 osds: 3 up (since 10m), 3 in (since 10m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             33 undersized+peered
 
  progress:
    Global Recovery Event (0s)
      [............................] 
 
root@omnisky:~# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 33 pgs inactive
            Degraded data redundancy: 33 pgs undersized
 
  services:
    mon: 1 daemons, quorum omnisky (age 28m)
    mgr: omnisky.bvogdw(active, since 28m)
    osd: 3 osds: 3 up (since 10m), 3 in (since 10m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   23 MiB used, 447 GiB / 447 GiB avail
    pgs:     100.000% pgs not active
             33 undersized+peered
 
  progress:
    Global Recovery Event (0s)
      [............................] 
 
root@omnisky:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME         STATUS  REWEIGHT  PRI-AFF
-1         0.43668  root default                               
-3         0.43668      host omnisky                           
 0    ssd  0.21829          osd.0         up   1.00000  1.00000
 1    ssd  0.10919          osd.1         up   1.00000  1.00000
 2    ssd  0.10919          osd.2         up   1.00000  1.00000
root@omnisky:~# ceph osd df
ID  CLASS  WEIGHT   REWEIGHT  SIZE     RAW USE  DATA     OMAP  META     AVAIL    %USE  VAR   PGS  STATUS
 0    ssd  0.21829   1.00000  224 GiB  7.8 MiB  216 KiB   0 B  7.6 MiB  224 GiB  0.00  0.68   32      up
 1    ssd  0.10919   1.00000  112 GiB  7.7 MiB  216 KiB   0 B  7.4 MiB  112 GiB  0.01  1.33    1      up
 2    ssd  0.10919   1.00000  112 GiB  7.6 MiB  216 KiB   0 B  7.4 MiB  112 GiB  0.01  1.32    0      up
                       TOTAL  447 GiB   23 MiB  648 KiB   0 B   22 MiB  447 GiB  0.01                   
MIN/MAX VAR: 0.68/1.33  STDDEV: 0.00
root@omnisky:~# cd /etc/ceph/
root@omnisky:/etc/ceph# ll
total 24
drwxr-xr-x   2 root root 4096 Jan  4 15:16 ./
drwxr-xr-x 110 root root 4096 Jan  4 14:32 ../
-rw-------   1 root root  151 Jan  4 15:16 ceph.client.admin.keyring
-rw-r--r--   1 root root  179 Jan  4 15:16 ceph.conf
-rw-r--r--   1 root root  595 Jan  4 14:39 ceph.pub
-rw-r--r--   1 root root   92 Oct 18 04:18 rbdmap
root@omnisky:/etc/ceph# ceph osd getcrushmap -o /etc/ceph/crushmap
7
root@omnisky:/etc/ceph# crushtool -d /etc/ceph/crushmap -o /etc/ceph/crushmap.txt

Command 'crushtool' not found, but can be installed with:

apt install ceph-base


root@omnisky:/etc/ceph# apt install ceph-base
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following additional packages will be installed:
  binutils binutils-common binutils-x86-64-linux-gnu ceph-fuse ceph-mds libbinutils libctf-nobfd0 libctf0 nvme-cli smartmontools
Suggested packages:
  binutils-doc gsmartcontrol smart-notifier mailx | mailutils
Recommended packages:
  btrfs-tools
The following NEW packages will be installed:
  binutils binutils-common binutils-x86-64-linux-gnu ceph-base ceph-fuse ceph-mds libbinutils libctf-nobfd0 libctf0 nvme-cli smartmontools
0 upgraded, 11 newly installed, 0 to remove and 5 not upgraded.
Need to get 0 B/12.9 MB of archives.
After this operation, 63.9 MB of additional disk space will be used.
Do you want to continue? [Y/n] Y
Get:1 file:/offline debs/ binutils-common 2.34-6ubuntu1.3 [207 kB]
Get:2 file:/offline debs/ libbinutils 2.34-6ubuntu1.3 [474 kB]
Get:3 file:/offline debs/ libctf-nobfd0 2.34-6ubuntu1.3 [47.4 kB]
Get:4 file:/offline debs/ libctf0 2.34-6ubuntu1.3 [46.6 kB]
Get:5 file:/offline debs/ binutils-x86-64-linux-gnu 2.34-6ubuntu1.3 [1,613 kB]
Get:6 file:/offline debs/ binutils 2.34-6ubuntu1.3 [3,380 B]
Get:7 file:/offline debs/ ceph-base 17.2.5-1focal [6,439 kB]
Get:8 file:/offline debs/ ceph-fuse 17.2.5-1focal [864 kB]
Get:9 file:/offline debs/ ceph-mds 17.2.5-1focal [2,387 kB]
Get:10 file:/offline debs/ nvme-cli 1.9-1ubuntu0.1 [283 kB]
Get:11 file:/offline debs/ smartmontools 7.1-1build1 [525 kB]
              
Selecting previously unselected package binutils-common:amd64.
(Reading database ... 73362 files and directories currently installed.)
Preparing to unpack .../00-binutils-common_2.34-6ubuntu1.3_amd64.deb ...
Unpacking binutils-common:amd64 (2.34-6ubuntu1.3) ...
Selecting previously unselected package libbinutils:amd64.
Preparing to unpack .../01-libbinutils_2.34-6ubuntu1.3_amd64.deb ...
Unpacking libbinutils:amd64 (2.34-6ubuntu1.3) ...
Selecting previously unselected package libctf-nobfd0:amd64.
Preparing to unpack .../02-libctf-nobfd0_2.34-6ubuntu1.3_amd64.deb ...
Unpacking libctf-nobfd0:amd64 (2.34-6ubuntu1.3) ...
Selecting previously unselected package libctf0:amd64.
Preparing to unpack .../03-libctf0_2.34-6ubuntu1.3_amd64.deb ...
Unpacking libctf0:amd64 (2.34-6ubuntu1.3) ...
Selecting previously unselected package binutils-x86-64-linux-gnu.
Preparing to unpack .../04-binutils-x86-64-linux-gnu_2.34-6ubuntu1.3_amd64.deb ...
Unpacking binutils-x86-64-linux-gnu (2.34-6ubuntu1.3) ...
Selecting previously unselected package binutils.
Preparing to unpack .../05-binutils_2.34-6ubuntu1.3_amd64.deb ...
Unpacking binutils (2.34-6ubuntu1.3) ...
Selecting previously unselected package ceph-base.
Preparing to unpack .../06-ceph-base_17.2.5-1focal_amd64.deb ...
Unpacking ceph-base (17.2.5-1focal) ...
Selecting previously unselected package ceph-fuse.
Preparing to unpack .../07-ceph-fuse_17.2.5-1focal_amd64.deb ...
Unpacking ceph-fuse (17.2.5-1focal) ...
Selecting previously unselected package ceph-mds.
Preparing to unpack .../08-ceph-mds_17.2.5-1focal_amd64.deb ...
Unpacking ceph-mds (17.2.5-1focal) ...
Selecting previously unselected package nvme-cli.
Preparing to unpack .../09-nvme-cli_1.9-1ubuntu0.1_amd64.deb ...
Unpacking nvme-cli (1.9-1ubuntu0.1) ...
Selecting previously unselected package smartmontools.
Preparing to unpack .../10-smartmontools_7.1-1build1_amd64.deb ...
Unpacking smartmontools (7.1-1build1) ...
Setting up smartmontools (7.1-1build1) ...
Created symlink /etc/systemd/system/smartd.service → /lib/systemd/system/smartmontools.service.
Created symlink /etc/systemd/system/multi-user.target.wants/smartmontools.service → /lib/systemd/system/smartmontools.service.
Setting up nvme-cli (1.9-1ubuntu0.1) ...
Setting up binutils-common:amd64 (2.34-6ubuntu1.3) ...
Setting up libctf-nobfd0:amd64 (2.34-6ubuntu1.3) ...
Setting up libbinutils:amd64 (2.34-6ubuntu1.3) ...
Setting up ceph-fuse (17.2.5-1focal) ...
Created symlink /etc/systemd/system/remote-fs.target.wants/ceph-fuse.target → /lib/systemd/system/ceph-fuse.target.
Created symlink /etc/systemd/system/ceph.target.wants/ceph-fuse.target → /lib/systemd/system/ceph-fuse.target.
Setting up libctf0:amd64 (2.34-6ubuntu1.3) ...
Setting up binutils-x86-64-linux-gnu (2.34-6ubuntu1.3) ...
Setting up binutils (2.34-6ubuntu1.3) ...
Setting up ceph-base (17.2.5-1focal) ...
Created symlink /etc/systemd/system/ceph.target.wants/ceph-crash.service → /lib/systemd/system/ceph-crash.service.
Setting up ceph-mds (17.2.5-1focal) ...
Created symlink /etc/systemd/system/multi-user.target.wants/ceph-mds.target → /lib/systemd/system/ceph-mds.target.
Created symlink /etc/systemd/system/ceph.target.wants/ceph-mds.target → /lib/systemd/system/ceph-mds.target.
Processing triggers for libc-bin (2.31-0ubuntu9.9) ...
Processing triggers for systemd (245.4-4ubuntu3.17) ...
Processing triggers for man-db (2.9.1-1) ...

root@omnisky:/etc/ceph# crushtool -d /etc/ceph/crushmap -o /etc/ceph/crushmap.txt
root@omnisky:/etc/ceph# more crushmap.txt
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54

# devices
device 0 osd.0 class ssd
device 1 osd.1 class ssd
device 2 osd.2 class ssd

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root

# buckets
host omnisky {
        id -3           # do not change unnecessarily
        id -4 class ssd         # do not change unnecessarily
        # weight 0.43668
        alg straw2
        hash 0  # rjenkins1
        item osd.0 weight 0.21829
        item osd.1 weight 0.10919
        item osd.2 weight 0.10919
}
root default {
        id -1           # do not change unnecessarily
        id -2 class ssd         # do not change unnecessarily
        # weight 0.43668
        alg straw2
        hash 0  # rjenkins1
        item omnisky weight 0.43668
}

# rules
rule replicated_rule {
        id 0
        type replicated
        step take default
        step chooseleaf firstn 0 type host
        step emit
}

# end crush map
root@omnisky:/etc/ceph# grep 'step chooseleaf' /etc/ceph/crushmap.txt
        step chooseleaf firstn 0 type host
root@omnisky:/etc/ceph# sed -i 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' /etc/ceph/crushmap.txt
root@omnisky:/etc/ceph# grep 'step chooseleaf' /etc/ceph/crushmap.txt
        step chooseleaf firstn 0 type osd
root@omnisky:/etc/ceph# crushtool -c /etc/ceph/crushmap.txt -o /etc/ceph/crushmap-new
root@omnisky:/etc/ceph# ceph osd setcrushmap -i /etc/ceph/crushmap-new
8
root@omnisky:/etc/ceph# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive, 1 pg peering
 
  services:
    mon: 1 daemons, quorum omnisky (age 33m)
    mgr: omnisky.bvogdw(active, since 32m)
    osd: 3 osds: 3 up (since 14m), 3 in (since 14m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   26 MiB used, 447 GiB / 447 GiB avail
    pgs:     3.030% pgs not active
             32 active+clean
             1  creating+peering
 
  progress:
    Global Recovery Event (0s)
      [............................] 
 
root@omnisky:/etc/ceph# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum omnisky (age 33m)
    mgr: omnisky.bvogdw(active, since 33m)
    osd: 3 osds: 3 up (since 14m), 3 in (since 15m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 2 objects, 705 KiB
    usage:   28 MiB used, 447 GiB / 447 GiB avail
    pgs:     33 active+clean
 
root@omnisky:/etc/ceph# ceph -s
  cluster:
    id:     7286e65a-8bfa-11ed-974b-dba0548c492a
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum omnisky (age 33m)
    mgr: omnisky.bvogdw(active, since 33m)
    osd: 3 osds: 3 up (since 15m), 3 in (since 15m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 2 objects, 705 KiB
    usage:   28 MiB used, 447 GiB / 447 GiB avail
    pgs:     33 active+clean
 


最后修改时间:2023-08-16 15:47:18
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。

评论