接上一篇:
http://blog.chinaunix.net/uid-21142030-id-5194039.html
为每个OSD NODE添加一块磁盘分区后再进行实验.
[root@ceph_node1 osd]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/centos-root 7022592 6789812 232780 97% /
devtmpfs 933432 0 933432 0% /dev
tmpfs 942208 0 942208 0% /dev/shm
tmpfs 942208 32268 909940 4% /run
tmpfs 942208 0 942208 0% /sys/fs/cgroup
/dev/vda1 508588 139920 368668 28% /boot
/dev/vdb1 8377344 32928 8344416 1% /osd
移除OSD,这里只做演示,实际生产环境中由于数据存放的原因此操作要慎重.
[root@ceph_monitor ~]# ceph osd out osd.3
marked out osd.3.
[root@ceph_monitor ~]# ceph osd out osd.4
marked out osd.4.
[root@ceph_monitor ~]# ceph osd down osd.4
marked down osd.4.
[root@ceph_monitor ~]# ceph osd down osd.3
marked down osd.3.
[root@ceph_monitor ~]# ceph osd rm osd.3
osd.3 does not exist.
[root@ceph_monitor ~]# ceph osd rm osd.4
removed osd.4
[root@ceph_node2 osd]# ceph -w
cluster f35a65ad-1a6a-4e8d-8f7e-cb5f113c0a02
health HEALTH_ERR
64 pgs stale
64 pgs stuck stale
2 full osd(s)
monmap e1: 1 mons at {ceph_monitor=10.0.2.33:6789/0}
election epoch 1, quorum 0 ceph_monitor
osdmap e22: 5 osds: 2 up, 2 in
flags full
pgmap v1644: 64 pgs, 1 pools, 0 bytes data, 0 objects
13260 MB used, 455 MB / 13716 MB avail
64 stale+active+clean
2015-09-14 13:21:39.089705 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:21:49.988763 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:22:00.864299 mon.0 [INF] from='client.? 10.0.2.33:0/1014270' entity='client.admin' cmd=[{"prefix": "osd out", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:22:00.980026 mon.0 [INF] from='client.? 10.0.2.33:0/1014270' entity='client.admin' cmd='[{"prefix": "osd out", "ids": ["osd.3"]}]': finished
2015-09-14 13:22:01.043549 mon.0 [INF] osdmap e23: 5 osds: 2 up, 1 in full
2015-09-14 13:22:01.064547 mon.0 [INF] pgmap v1645: 64 pgs: 64 stale+active+clean; 0 bytes data, 6629 MB used, 228 MB / 6858 MB avail
2015-09-14 13:22:03.944095 mon.0 [INF] from='client.? 10.0.2.33:0/1014303' entity='client.admin' cmd=[{"prefix": "osd out", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:22:04.058603 mon.0 [INF] from='client.? 10.0.2.33:0/1014303' entity='client.admin' cmd='[{"prefix": "osd out", "ids": ["osd.4"]}]': finished
2015-09-14 13:22:04.136343 mon.0 [INF] osdmap e24: 5 osds: 2 up, 0 in full
2015-09-14 13:22:04.209347 mon.0 [INF] pgmap v1646: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:22:04.496508 mon.0 [INF] osdmap e25: 5 osds: 2 up, 0 in
2015-09-14 13:22:04.514888 mon.0 [INF] pgmap v1647: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:22:11.113375 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:22:19.412532 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:22:41.918295 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:22:51.015761 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:23:12.023552 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:23:15.296925 mon.0 [INF] pgmap v1648: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:23:19.301533 mon.0 [INF] pgmap v1649: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:23:22.419805 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:23:46.828958 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:23:54.523184 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:24:17.832867 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:24:27.627448 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:24:51.037841 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:24:57.930727 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:25:15.338544 mon.0 [INF] pgmap v1650: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:19.336484 mon.0 [INF] pgmap v1651: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:23.142214 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:25:27.034190 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:25:40.654246 mon.0 [INF] from='client.? 10.0.2.33:0/1014387' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:25:40.778345 mon.0 [INF] from='client.? 10.0.2.33:0/1014387' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.4"]}]': finished
2015-09-14 13:25:40.801149 mon.0 [INF] osdmap e26: 5 osds: 1 up, 0 in
2015-09-14 13:25:40.820513 mon.0 [INF] pgmap v1652: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:42.980782 mon.0 [INF] osd.4 10.0.2.32:6800/4919 boot
2015-09-14 13:25:42.998345 mon.0 [INF] osdmap e27: 5 osds: 2 up, 0 in
2015-09-14 13:25:43.016972 mon.0 [INF] pgmap v1653: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:45.315515 mon.0 [INF] from='client.? 10.0.2.33:0/1014420' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:25:44.430084 osd.4 [WRN] map e26 wrongly marked me down
2015-09-14 13:25:45.428227 mon.0 [INF] from='client.? 10.0.2.33:0/1014420' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.3"]}]': finished
2015-09-14 13:25:45.445942 mon.0 [INF] osdmap e28: 5 osds: 1 up, 0 in
2015-09-14 13:25:45.464796 mon.0 [INF] pgmap v1654: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:51.637408 mon.0 [INF] from='client.? 10.0.2.33:0/1014453' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:25:51.741785 mon.0 [INF] from='client.? 10.0.2.33:0/1014453' entity='client.admin' cmd='[{"prefix": "osd rm", "ids": ["osd.3"]}]': finished
2015-09-14 13:25:51.852460 mon.0 [INF] osdmap e29: 4 osds: 1 up, 0 in
2015-09-14 13:25:51.875071 mon.0 [INF] pgmap v1655: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:54.707323 mon.0 [INF] from='client.? 10.0.2.33:0/1014486' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:25:53.957396 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:25:57.937731 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:26:01.313844 mon.0 [INF] from='client.? 10.0.2.33:0/1014519' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:26:01.434547 mon.0 [INF] from='client.? 10.0.2.33:0/1014519' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.4"]}]': finished
2015-09-14 13:26:01.452332 mon.0 [INF] osdmap e30: 4 osds: 0 up, 0 in
2015-09-14 13:26:01.477136 mon.0 [INF] pgmap v1656: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:26:04.414501 mon.0 [INF] from='client.? 10.0.2.33:0/1014552' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:26:07.547864 mon.0 [INF] from='client.? 10.0.2.33:0/1014585' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:26:07.675705 mon.0 [INF] from='client.? 10.0.2.33:0/1014585' entity='client.admin' cmd='[{"prefix": "osd rm", "ids": ["osd.4"]}]': finished
2015-09-14 13:26:07.693444 mon.0 [INF] osdmap e31: 3 osds: 0 up, 0 in
2015-09-14 13:26:07.718181 mon.0 [INF] pgmap v1657: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
为每个OSD NODE添加一块磁盘分区后再进行实验.
[root@ceph_node1 osd]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/centos-root 7022592 6789812 232780 97% /
devtmpfs 933432 0 933432 0% /dev
tmpfs 942208 0 942208 0% /dev/shm
tmpfs 942208 32268 909940 4% /run
tmpfs 942208 0 942208 0% /sys/fs/cgroup
/dev/vda1 508588 139920 368668 28% /boot
/dev/vdb1 8377344 32928 8344416 1% /osd
移除OSD,这里只做演示,实际生产环境中由于数据存放的原因此操作要慎重.
[root@ceph_monitor ~]# ceph osd out osd.3
marked out osd.3.
[root@ceph_monitor ~]# ceph osd out osd.4
marked out osd.4.
[root@ceph_monitor ~]# ceph osd down osd.4
marked down osd.4.
[root@ceph_monitor ~]# ceph osd down osd.3
marked down osd.3.
[root@ceph_monitor ~]# ceph osd rm osd.3
osd.3 does not exist.
[root@ceph_monitor ~]# ceph osd rm osd.4
removed osd.4
[root@ceph_node2 osd]# ceph -w
cluster f35a65ad-1a6a-4e8d-8f7e-cb5f113c0a02
health HEALTH_ERR
64 pgs stale
64 pgs stuck stale
2 full osd(s)
monmap e1: 1 mons at {ceph_monitor=10.0.2.33:6789/0}
election epoch 1, quorum 0 ceph_monitor
osdmap e22: 5 osds: 2 up, 2 in
flags full
pgmap v1644: 64 pgs, 1 pools, 0 bytes data, 0 objects
13260 MB used, 455 MB / 13716 MB avail
64 stale+active+clean
2015-09-14 13:21:39.089705 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:21:49.988763 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:22:00.864299 mon.0 [INF] from='client.? 10.0.2.33:0/1014270' entity='client.admin' cmd=[{"prefix": "osd out", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:22:00.980026 mon.0 [INF] from='client.? 10.0.2.33:0/1014270' entity='client.admin' cmd='[{"prefix": "osd out", "ids": ["osd.3"]}]': finished
2015-09-14 13:22:01.043549 mon.0 [INF] osdmap e23: 5 osds: 2 up, 1 in full
2015-09-14 13:22:01.064547 mon.0 [INF] pgmap v1645: 64 pgs: 64 stale+active+clean; 0 bytes data, 6629 MB used, 228 MB / 6858 MB avail
2015-09-14 13:22:03.944095 mon.0 [INF] from='client.? 10.0.2.33:0/1014303' entity='client.admin' cmd=[{"prefix": "osd out", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:22:04.058603 mon.0 [INF] from='client.? 10.0.2.33:0/1014303' entity='client.admin' cmd='[{"prefix": "osd out", "ids": ["osd.4"]}]': finished
2015-09-14 13:22:04.136343 mon.0 [INF] osdmap e24: 5 osds: 2 up, 0 in full
2015-09-14 13:22:04.209347 mon.0 [INF] pgmap v1646: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:22:04.496508 mon.0 [INF] osdmap e25: 5 osds: 2 up, 0 in
2015-09-14 13:22:04.514888 mon.0 [INF] pgmap v1647: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:22:11.113375 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:22:19.412532 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:22:41.918295 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:22:51.015761 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:23:12.023552 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:23:15.296925 mon.0 [INF] pgmap v1648: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:23:19.301533 mon.0 [INF] pgmap v1649: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:23:22.419805 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:23:46.828958 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:23:54.523184 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:24:17.832867 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:24:27.627448 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:24:51.037841 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:24:57.930727 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:25:15.338544 mon.0 [INF] pgmap v1650: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:19.336484 mon.0 [INF] pgmap v1651: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:23.142214 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:25:27.034190 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:25:40.654246 mon.0 [INF] from='client.? 10.0.2.33:0/1014387' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:25:40.778345 mon.0 [INF] from='client.? 10.0.2.33:0/1014387' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.4"]}]': finished
2015-09-14 13:25:40.801149 mon.0 [INF] osdmap e26: 5 osds: 1 up, 0 in
2015-09-14 13:25:40.820513 mon.0 [INF] pgmap v1652: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:42.980782 mon.0 [INF] osd.4 10.0.2.32:6800/4919 boot
2015-09-14 13:25:42.998345 mon.0 [INF] osdmap e27: 5 osds: 2 up, 0 in
2015-09-14 13:25:43.016972 mon.0 [INF] pgmap v1653: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:45.315515 mon.0 [INF] from='client.? 10.0.2.33:0/1014420' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:25:44.430084 osd.4 [WRN] map e26 wrongly marked me down
2015-09-14 13:25:45.428227 mon.0 [INF] from='client.? 10.0.2.33:0/1014420' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.3"]}]': finished
2015-09-14 13:25:45.445942 mon.0 [INF] osdmap e28: 5 osds: 1 up, 0 in
2015-09-14 13:25:45.464796 mon.0 [INF] pgmap v1654: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:51.637408 mon.0 [INF] from='client.? 10.0.2.33:0/1014453' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:25:51.741785 mon.0 [INF] from='client.? 10.0.2.33:0/1014453' entity='client.admin' cmd='[{"prefix": "osd rm", "ids": ["osd.3"]}]': finished
2015-09-14 13:25:51.852460 mon.0 [INF] osdmap e29: 4 osds: 1 up, 0 in
2015-09-14 13:25:51.875071 mon.0 [INF] pgmap v1655: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:25:54.707323 mon.0 [INF] from='client.? 10.0.2.33:0/1014486' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:25:53.957396 osd.4 [WRN] OSD near full (96%)
2015-09-14 13:25:57.937731 osd.3 [WRN] OSD near full (96%)
2015-09-14 13:26:01.313844 mon.0 [INF] from='client.? 10.0.2.33:0/1014519' entity='client.admin' cmd=[{"prefix": "osd down", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:26:01.434547 mon.0 [INF] from='client.? 10.0.2.33:0/1014519' entity='client.admin' cmd='[{"prefix": "osd down", "ids": ["osd.4"]}]': finished
2015-09-14 13:26:01.452332 mon.0 [INF] osdmap e30: 4 osds: 0 up, 0 in
2015-09-14 13:26:01.477136 mon.0 [INF] pgmap v1656: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail
2015-09-14 13:26:04.414501 mon.0 [INF] from='client.? 10.0.2.33:0/1014552' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.3"]}]: dispatch
2015-09-14 13:26:07.547864 mon.0 [INF] from='client.? 10.0.2.33:0/1014585' entity='client.admin' cmd=[{"prefix": "osd rm", "ids": ["osd.4"]}]: dispatch
2015-09-14 13:26:07.675705 mon.0 [INF] from='client.? 10.0.2.33:0/1014585' entity='client.admin' cmd='[{"prefix": "osd rm", "ids": ["osd.4"]}]': finished
2015-09-14 13:26:07.693444 mon.0 [INF] osdmap e31: 3 osds: 0 up, 0 in
2015-09-14 13:26:07.718181 mon.0 [INF] pgmap v1657: 64 pgs: 64 stale+active+clean; 0 bytes data, 0 kB used, 0 kB / 0 kB avail