openstack 与 ceph ( pool 管理 )

简介: 目标 管理 ceph 中的存储池, 保留 rbd 池, 创建 volumes 池, 删除其他存储池, 增加 pg_num 数量查询当前池方法[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd lspools0 data,1 metadata,2 rbd,删除 data, metadata 池[roo

目标
管理 ceph 中的存储池, 保留 rbd 池, 创建 volumes 池, 删除其他存储池, 增加 pg_num 数量

查询当前池方法

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd  lspools
0 data,1 metadata,2 rbd,

删除 data, metadata 池

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd pool delete metadata metadata --yes-i-really-really-mean-it
pool 'metadata' removed
[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd pool delete data data --yes-i-really-really-mean-it
pool 'data' removed

创建 volumes 存储池

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd pool create volumes 4000 4000
pool 'volumes' created

查询 volumes 池当前复制副本数量

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd dump | grep 'replicated size' | grep volumes
pool 4 'volumes' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 4000 pgp_num 4000 last_change 232 flags hashpspool stripe_width 0

当前复制副本为2

修改复制副本为 3 并验证

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd pool set volumes size 3
set pool 4 size to 3

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd dump | grep 'replicated size' | grep volumes
pool 4 'volumes' replicated size 3 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 4000 pgp_num 4000 last_change 239 flags hashpspool stripe_width 0

设定副本数量最少为2, 当前值为 1

[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd pool set volumes min_size 2
set pool 4 min_size to 2
[root@hh-yun-ceph-cinder015-128055 ~]# ceph osd dump | grep 'replicated size' | grep volumes
pool 4 'volumes' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 4000 pgp_num 4000 last_change 241 flags hashpspool stripe_width 0

参考当前 osd

[root@hh-yun-ceph-cinder017-128057 ~]# ceph osd  tree
# id    weight  type name       up/down reweight
-1      70      root default
-2      10              host hh-yun-ceph-cinder015-128055
0       1                       osd.0   up      1
1       1                       osd.1   up      1
2       1                       osd.2   up      1
3       1                       osd.3   up      1
4       1                       osd.4   up      1
5       1                       osd.5   up      1
6       1                       osd.6   up      1
7       1                       osd.7   up      1
8       1                       osd.8   up      1
9       1                       osd.9   up      1
-3      10              host hh-yun-ceph-cinder016-128056
10      1                       osd.10  up      1
11      1                       osd.11  up      1
12      1                       osd.12  up      1
13      1                       osd.13  up      1
14      1                       osd.14  up      1
15      1                       osd.15  up      1
16      1                       osd.16  up      1
17      1                       osd.17  up      1
18      1                       osd.18  up      1
19      1                       osd.19  up      1
-4      10              host hh-yun-ceph-cinder017-128057
20      1                       osd.20  up      1
21      1                       osd.21  up      1
22      1                       osd.22  up      1
23      1                       osd.23  up      1
24      1                       osd.24  up      1
25      1                       osd.25  up      1
26      1                       osd.26  up      1
27      1                       osd.27  up      1
28      1                       osd.28  up      1
29      1                       osd.29  up      1
-5      10              host hh-yun-ceph-cinder023-128073
30      1                       osd.30  up      1
31      1                       osd.31  up      1
32      1                       osd.32  up      1
33      1                       osd.33  up      1
34      1                       osd.34  up      1
35      1                       osd.35  up      1
36      1                       osd.36  up      1
37      1                       osd.37  up      1
38      1                       osd.38  up      1
39      1                       osd.39  up      1
-6      10              host hh-yun-ceph-cinder024-128074
40      1                       osd.40  up      1
41      1                       osd.41  up      1
42      1                       osd.42  up      1
43      1                       osd.43  up      1
44      1                       osd.44  up      1
45      1                       osd.45  up      1
46      1                       osd.46  up      1
47      1                       osd.47  up      1
48      1                       osd.48  up      1
49      1                       osd.49  up      1
-7      10              host hh-yun-ceph-cinder025-128075
50      1                       osd.50  up      1
51      1                       osd.51  up      1
52      1                       osd.52  up      1
53      1                       osd.53  up      1
54      1                       osd.54  up      1
55      1                       osd.55  up      1
56      1                       osd.56  up      1
57      1                       osd.57  up      1
58      1                       osd.58  up      1
59      1                       osd.59  up      1
-8      10              host hh-yun-ceph-cinder026-128076
60      1                       osd.60  up      1
61      1                       osd.61  up      1
62      1                       osd.62  up      1
63      1                       osd.63  up      1
64      1                       osd.64  up      1
65      1                       osd.65  up      1
66      1                       osd.66  up      1
67      1                       osd.67  up      1
68      1                       osd.68  up      1
69      1                       osd.69  up      1

获得 crush map

获得默认 crushmap (加密)
ceph osd getcrushmap -o crushmap.dump
转换 crushmap 格式 (加密 -> 明文格式)
crushtool -d crushmap.dump -o crushmap.txt
转换 crushmap 格式(明文 -> 加密格式)
crushtool -c crushmap.txt -o crushmap.done
重新使用新 crushmap
ceph osd setcrushmap -i crushmap.done

创建 crush map

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9
device 10 osd.10
device 11 osd.11
device 12 osd.12
device 13 osd.13
device 14 osd.14
device 15 osd.15
device 16 osd.16
device 17 osd.17
device 18 osd.18
device 19 osd.19
device 20 osd.20
device 21 osd.21
device 22 osd.22
device 23 osd.23
device 24 osd.24
device 25 osd.25
device 26 osd.26
device 27 osd.27
device 28 osd.28
device 29 osd.29
device 30 osd.30
device 31 osd.31
device 32 osd.32
device 33 osd.33
device 34 osd.34
device 35 osd.35
device 36 osd.36
device 37 osd.37
device 38 osd.38
device 39 osd.39
device 40 osd.40
device 41 osd.41
device 42 osd.42
device 43 osd.43
device 44 osd.44
device 45 osd.45
device 46 osd.46
device 47 osd.47
device 48 osd.48
device 49 osd.49
device 50 osd.50
device 51 osd.51
device 52 osd.52
device 53 osd.53
device 54 osd.54
device 55 osd.55
device 56 osd.56
device 57 osd.57
device 58 osd.58
device 59 osd.59
device 60 osd.60
device 61 osd.61
device 62 osd.62
device 63 osd.63
device 64 osd.64
device 65 osd.65
device 66 osd.66
device 67 osd.67
device 68 osd.68
device 69 osd.69

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root

# buckets
host hh-yun-ceph-cinder015-128055 {
        id -2           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.0 weight 1.000
        item osd.1 weight 1.000
        item osd.2 weight 1.000
        item osd.3 weight 1.000
        item osd.4 weight 1.000
        item osd.5 weight 1.000
        item osd.6 weight 1.000
        item osd.7 weight 1.000
        item osd.8 weight 1.000
        item osd.9 weight 1.000
}
host hh-yun-ceph-cinder016-128056 {
        id -3           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.10 weight 1.000
        item osd.11 weight 1.000
        item osd.12 weight 1.000
        item osd.13 weight 1.000
        item osd.14 weight 1.000
        item osd.15 weight 1.000
        item osd.16 weight 1.000
        item osd.17 weight 1.000
        item osd.18 weight 1.000
        item osd.19 weight 1.000
}
host hh-yun-ceph-cinder017-128057 {
        id -4           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.20 weight 1.000
        item osd.21 weight 1.000
        item osd.22 weight 1.000
        item osd.23 weight 1.000
        item osd.24 weight 1.000
        item osd.25 weight 1.000
        item osd.26 weight 1.000
        item osd.27 weight 1.000
        item osd.28 weight 1.000
        item osd.29 weight 1.000
}
host hh-yun-ceph-cinder023-128073 {
        id -5           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.30 weight 1.000
        item osd.31 weight 1.000
        item osd.32 weight 1.000
        item osd.33 weight 1.000
        item osd.34 weight 1.000
        item osd.35 weight 1.000
        item osd.36 weight 1.000
        item osd.37 weight 1.000
        item osd.38 weight 1.000
        item osd.39 weight 1.000
}
host hh-yun-ceph-cinder024-128074 {
        id -6           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.40 weight 1.000
        item osd.41 weight 1.000
        item osd.42 weight 1.000
        item osd.43 weight 1.000
        item osd.44 weight 1.000
        item osd.45 weight 1.000
        item osd.46 weight 1.000
        item osd.47 weight 1.000
        item osd.48 weight 1.000
        item osd.49 weight 1.000
}
host hh-yun-ceph-cinder025-128075 {
        id -7           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.50 weight 1.000
        item osd.51 weight 1.000
        item osd.52 weight 1.000
        item osd.53 weight 1.000
        item osd.54 weight 1.000
        item osd.55 weight 1.000
        item osd.56 weight 1.000
        item osd.57 weight 1.000
        item osd.58 weight 1.000
        item osd.59 weight 1.000
}
host hh-yun-ceph-cinder026-128076 {
        id -8           # do not change unnecessarily
        # weight 10.000
        alg straw
        hash 0  # rjenkins1
        item osd.60 weight 1.000
        item osd.61 weight 1.000
        item osd.62 weight 1.000
        item osd.63 weight 1.000
        item osd.64 weight 1.000
        item osd.65 weight 1.000
        item osd.66 weight 1.000
        item osd.67 weight 1.000
        item osd.68 weight 1.000
        item osd.69 weight 1.000
}
root default {
        id -1           # do not change unnecessarily
        # weight 70.000
        alg straw
        hash 0  # rjenkins1
        item hh-yun-ceph-cinder015-128055 weight 10.000
        item hh-yun-ceph-cinder016-128056 weight 10.000
        item hh-yun-ceph-cinder017-128057 weight 10.000
        item hh-yun-ceph-cinder023-128073 weight 10.000
        item hh-yun-ceph-cinder024-128074 weight 10.000
        item hh-yun-ceph-cinder025-128075 weight 10.000
        item hh-yun-ceph-cinder026-128076 weight 10.000
}

# rules
rule default {
        ruleset 1
        type replicated
        min_size 2
        max_size 3
        step take default
        step chooseleaf firstn 0 type host
        step emit
}

# end crush map

使用新的 CRUSH MAP

[root@hh-yun-ceph-cinder015-128055 tmp]# crushtool -c crush.txt -o crushmap.new
[root@hh-yun-ceph-cinder015-128055 tmp]# ceph osd setcrushmap -i crushmap.new
set crush map
[root@hh-yun-ceph-cinder015-128055 tmp]# ceph osd pool set volumes crush_ruleset 1
set pool 4 crush_ruleset to 1

查询当前 ceph pool 状态

[root@hh-yun-ceph-cinder015-128055 tmp]# ceph osd dump | grep 'replicated size' | grep volumes
pool 4 'volumes' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 4000 pgp_num 4000 last_change 248 flags hashpspool stripe_width 0
[root@hh-yun-ceph-cinder015-128055 tmp]# ceph health
HEALTH_OK
目录
相关文章
|
存储 负载均衡 监控
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--开放虚拟网络(OVN)简介
第六章 管理OPENSTACK网络--开放虚拟网络(OVN)简介
1643 0
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--开放虚拟网络(OVN)简介
|
存储 缓存 运维
openstack对接ceph存储
openstack对接ceph存储
|
存储 网络安全
【Openstack】Ceph 与Openstack存储对接
Ceph 与Openstack存储对接
5046 12
【Openstack】Ceph 与Openstack存储对接
|
运维 网络协议 测试技术
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--网络配置选项+章节实验
第六章 管理OPENSTACK网络--网络配置选项+章节实验
571 1
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--网络配置选项+章节实验
|
存储 网络协议 安全
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--网络协议类型
第六章 管理OPENSTACK网络--网络协议类型
457 0
金鱼哥RHCA回忆录:CL210管理OPENSTACK网络--网络协议类型
|
运维 架构师 安全