1,环境配置
#在所有节点配置YUM:
#清空原来自带配置文件:
cd /etc/yum.repos.d/
mkdir /tmp/bak
mv * /tmp/bak/
#配置系统源码,epel源:
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install wget -y
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
#YUM优先级别:
yum -y install yum-plugin-priorities.noarch
#配置ceph源:
cat << EOF | tee /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/\$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
EOF
#关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
#配置主机名称:
ceph1节点:
hostnamectl --static set-hostname ceph1
ceph2节点:
hostnamectl --static set-hostname ceph2
ceph3节点:
hostnamectl --static set-hostname ceph3
#所有节点配置hosts文件:
/etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.231 ceph1
192.168.0.232 ceph2
192.168.0.233 ceph3
#所有节点NTP配置:
在所有集群和客户端节点安装NTP,修改配置。
yum -y install ntp ntpdate
以ceph1为NTP服务端节点,在ceph1新建NTP文件。
vi /etc/ntp.conf
并新增如下内容作为NTP服务端:
restrict 127.0.0.1
restrict ::1
restrict 192.168.3.0 mask 255.255.255.0 //ceph1的网段与掩码
server 127.127.1.0
fudge 127.127.1.0 stratum 8
在ceph2、ceph3及所有客户机节点新建NTP文件。
vi /etc/ntp.conf
并新增如下内容作为客户端:
server 192.168.3.166
systemctl start ntpd
systemctl enable ntpd
systemctl status ntpd
#ssh配置,在ceph1节点生成公钥,并发放到各个主机/客户机节点。:
ssh-keygen -t rsa #回车采取默认配置
for i in {1..3}; do ssh-copy-id ceph$i; done #根据提示输入yes及节点密码
for i in {1..3}; do ssh-copy-id client$i; done
#在所有节点,关闭SELinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
2. 安装Ceph软件
使用yum install安装ceph的时候会默认安装当前已有的最新版,如果不想安装最新版本,可以在/etc/yum.conf文件中加以限制。
2.1 在所有集群和客户端节点安装Ceph
yum -y install ceph
ceph -v命令查看版本:
[root@ceph1 ~]# ceph -v
ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable)
[root@ceph2 ~]# ceph -v
ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable)
[root@ceph3 ~]# ceph -v
ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable)
2.2 在ceph1节点额外安装ceph-deploy。
yum -y install ceph-deploy
3.部署MON节点
3.1 创建目录生成配置文件
mkdir cluster
cd cluster
ceph-deploy new ceph1 ceph2 ceph3
[root@ceph1 ~]# cd cluster/
[root@ceph1 cluster]# ceph-deploy new ceph1 ceph2 ceph3
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new ceph1 ceph2 ceph3
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] func : <function new at 0x7ffb7dc07de8>
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ffb7d58c6c8>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] ssh_copykey : True
[ceph_deploy.cli][INFO ] mon : ['ceph1', 'ceph2', 'ceph3']
[ceph_deploy.cli][INFO ] public_network : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] cluster_network : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] fsid : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph1][DEBUG ] connected to host: ceph1
[ceph1][DEBUG ] detect platform information from remote host
[ceph1][DEBUG ] detect machine type
[ceph1][DEBUG ] find the location of an executable
[ceph1][INFO ] Running command: /usr/sbin/ip link show
[ceph1][INFO ] Running command: /usr/sbin/ip addr show
[ceph1][DEBUG ] IP addresses found: [u'192.168.0.231']
[ceph_deploy.new][DEBUG ] Resolving host ceph1
[ceph_deploy.new][DEBUG ] Monitor ceph1 at 192.168.0.231
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph2][DEBUG ] connected to host: ceph1
[ceph2][INFO ] Running command: ssh -CT -o BatchMode=yes ceph2
[ceph2][DEBUG ] connected to host: ceph2
[ceph2][DEBUG ] detect platform information from remote host
[ceph2][DEBUG ] detect machine type
[ceph2][DEBUG ] find the location of an executable
[ceph2][INFO ] Running command: /usr/sbin/ip link show
[ceph2][INFO ] Running command: /usr/sbin/ip addr show
[ceph2][DEBUG ] IP addresses found: [u'192.168.0.232']
[ceph_deploy.new][DEBUG ] Resolving host ceph2
[ceph_deploy.new][DEBUG ] Monitor ceph2 at 192.168.0.232
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph3][DEBUG ] connected to host: ceph1
[ceph3][INFO ] Running command: ssh -CT -o BatchMode=yes ceph3
[ceph3][DEBUG ] connected to host: ceph3
[ceph3][DEBUG ] detect platform information from remote host
[ceph3][DEBUG ] detect machine type
[ceph3][DEBUG ] find the location of an executable
[ceph3][INFO ] Running command: /usr/sbin/ip link show
[ceph3][INFO ] Running command: /usr/sbin/ip addr show
[ceph3][DEBUG ] IP addresses found: [u'192.168.0.233']
[ceph_deploy.new][DEBUG ] Resolving host ceph3
[ceph_deploy.new][DEBUG ] Monitor ceph3 at 192.168.0.233
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph1', 'ceph2', 'ceph3']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.0.231', '192.168.0.232', '192.168.0.233']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
3.2 初始化密钥
ceph-deploy mon create-initial
3.3 将ceph.client.admin.keyring拷贝到各个节点上
ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3
3.4 查看是否配置成功。
[root@ceph1 cluster]# ceph -s
cluster:
id: ea192428-05d2-437a-8cce-9d187de82dd5
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
4 部署MGR节点
ceph-deploy mgr create ceph1 ceph2 ceph3
查看MGR是否部署成功。
ceph -s
[root@ceph1 cluster]# ceph -s
cluster:
id: ea192428-05d2-437a-8cce-9d187de82dd5
health: HEALTH_WARN
OSD count 0 < osd_pool_default_size 3
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 8m)
mgr: ceph1(active, since 22s), standbys: ceph2, ceph3
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
5 部署OSD节点
ceph-deploy osd create --data /dev/sdb ceph1
ceph-deploy osd create --data /dev/sdc ceph1
ceph-deploy osd create --data /dev/sdd ceph1
ceph-deploy osd create --data /dev/sdb ceph2
ceph-deploy osd create --data /dev/sdc ceph2
ceph-deploy osd create --data /dev/sdd ceph2
ceph-deploy osd create --data /dev/sdb ceph3
ceph-deploy osd create --data /dev/sdc ceph3
ceph-deploy osd create --data /dev/sdd ceph3
创建成功后,查看是否正常
[root@ceph1 cluster]# ceph -s
cluster:
id: ea192428-05d2-437a-8cce-9d187de82dd5
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 14m)
mgr: ceph1(active, since 6m), standbys: ceph2, ceph3
osd: 9 osds: 9 up (since 2m), 9 in (since 2m)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 9.0 GiB used, 135 GiB / 144 GiB avail
pgs:
6 验证Ceph
创建存储池
ceph osd pool create vdbench 10 10
创建块设备
rbd create image01 --size 200--pool vdbench --image-format 2 --image-feature layering
rbd ls --pool vdbench
[root@ceph1 cluster]# rbd create image01 --size 200 --pool vdbench --image-format 2 --image-feature layering
[root@ceph1 cluster]# rbd ls --pool vdbench
image01