一、Heartbeat网络架构
二、准备工作
1、操作系统
1
2
3
4
5
6
7
8
9
10
11
12
|
CentOS 6.4 X86-64 最小化安装
由于用源码编译安装heartbeat一直没有通过,所以没办法只能采用yum安装。
heartbeat v3
这里需要强调的就是博文中:
# 表示是在node1、node2上都要执行的命令
而
[root@node1 ~]
# 表示仅在node1节点上执行的命令
[root@node2 ~]
# 表示仅在node2节点上执行的命令
[root@node3 ~]
# 表示仅在node3节点上执行的命令
新遇到的问题,本来这边博文准备把Heartbeat的资源管理器由haresources换成crm,但是在配置crm的时候,总是不成功,如果朋友有crm成功的案例,希望能指导指导。谢谢
|
2、地址规划
1
2
3
4
|
node1 192.168.1.196 255.255.255.0 192.168.0.1 node1.
test
.com eth1 Active
node2 192.168.1.197 255.255.255.0 192.168.0.1 node2.
test
.com eth1 Passive
node3 192.168.1.198 255.255.255.0 192.168.0.1 node3.
test
.com eth1 nfs
vip 192.168.1.223 255.255.255.0
|
3、主机名解析
1
2
3
4
5
6
7
8
|
[root@node1 ~]
# uname -n
node1.
test
.com
[root@node1 ~]
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.196 node1.
test
.com node1
192.168.1.197 node2.
test
.com node2
192.168.1.198 node3.
test
.com node3
|
1
2
3
4
5
6
7
8
|
[root@node2 ~]
# uname -n
node2.
test
.com
[root@node2 ~]
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.196 node1.
test
.com node1
192.168.1.197 node2.
test
.com node2
192.168.1.198 node3.
test
.com node3
|
4、双机互信
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
[root@node1 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
Generating public
/private
rsa key pair.
Created directory
'/root/.ssh'
.
Your identification has been saved
in
/root/
.
ssh
/id_rsa
.
Your public key has been saved
in
/root/
.
ssh
/id_rsa
.pub.
The key fingerprint is:
ce:f3:d7:63:10:9b:d2:86:f8:8a:5a:ee:41:d8:d2:01 root@node1.
test
.com
The key's randomart image is:
+--[ RSA 2048]----+
| E |
| . |
| . |
| + . . |
| o + S. o + |
| o o. o * |
| o +. o o |
| o o o. . + |
| .o+ .... . . |
+-----------------+
[root@node1 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node2.test.com
The authenticity of host
'node2.test.com (192.168.0.102)'
can't be established.
RSA key fingerprint is 46:b9:7c:11:db:75:93:ad:f1:26:f0:a7:4d:00:40:20.
Are you sure you want to
continue
connecting (
yes
/no
)?
yes
Warning: Permanently added
'node2.test.com,192.168.0.102'
(RSA) to the list of known hosts.
root@node2.
test
.com's password:
Now try logging into the machine, with
"ssh 'root@node2.test.com'"
, and check
in
:
.
ssh
/authorized_keys
to
make
sure we haven
't added extra keys that you weren'
t expecting.
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
[root@node2 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
Generating public
/private
rsa key pair.
Your identification has been saved
in
/root/
.
ssh
/id_rsa
.
Your public key has been saved
in
/root/
.
ssh
/id_rsa
.pub.
The key fingerprint is:
c4:e3:71:f8:82:09:f0:42:9c:e7:20:db:db:ce:
dc
:0b root@node2.
test
.com
The key's randomart image is:
+--[ RSA 2048]----+
| .o. |
|..+o. . . |
| +.+o * . |
|. .... = = |
| o o S . |
| . . . |
| +E. |
| +.. |
| .. |
+-----------------+
[root@node2 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node1.test.com
The authenticity of host
'node1.test.com (192.168.0.101)'
can't be established.
RSA key fingerprint is 46:b9:7c:11:db:75:93:ad:f1:26:f0:a7:4d:00:40:20.
Are you sure you want to
continue
connecting (
yes
/no
)?
yes
Warning: Permanently added
'node1.test.com,192.168.0.101'
(RSA) to the list of known hosts.
root@node1.
test
.com's password:
Now try logging into the machine, with
"ssh 'root@node1.test.com'"
, and check
in
:
.
ssh
/authorized_keys
to
make
sure we haven
't added extra keys that you weren'
t expecting.
|
5、时间同步
1
2
|
# yum -y install ntpdate
# ntpdate asia.pool.ntp.org
|
6、关闭防火墙
1
2
3
4
|
# getenforce
Disabled
# /etc/init.d/iptables status
iptables:未运行防火墙。
|
三、安装heartbeat包
1、安装epel源
1
2
3
|
# wget http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
# wget http://rpms.famillecollet.com/enterprise/remi-release-6.rpm
# rpm -Uvh remi-release-6*.rpm epel-release-6*.rpm
|
2、修改epel源的配置文件
1
2
|
# sed -i 's/#baseurl/baseurl/g' /etc/yum.repos.d/epel.repo
# sed -i 's/mirrorlist/#mirrorlist/' /etc/yum.repos.d/epel.repo
|
3、安装heartbeat包
1
|
# yum install heartbeat heartbeat-libs
|
4、查看heartbeat所依赖的包
四、配置Heartbeat服务
1、heartbeat配置文件的介绍
1
2
3
4
|
heartbeat3个配置文件
authkeys
#节点之间认证的秘钥key文件,权限为600
ha.cf
#heartbeat服务核心配置文件
haresources
#集群资源管理器(haresource | crm)
|
2、拷贝heartbeat初始配置文件
1
|
[root@node1 ~]
# cp /usr/share/doc/heartbeat-3.0.4/{ha.cf,authkeys,haresources} /etc/ha.d/
|
3、编辑authkeys文件
1
2
3
4
5
6
7
8
9
10
|
[root@node1 ~]
# dd if=/dev/random bs=512 count=1 | openssl md5 #生成密钥随机数
记录了0+1 的读入
记录了0+1 的写出
72字节(72 B)已复制,4.8467e-05 秒,1.5 MB/秒
(stdin)= acf7401e6b20d4cec482ba1160eb8efe
[root@node1 ~]
# vim /etc/ha.d/authkeys
#注释:末尾添加以下两行
auth 1
1 md5 acf7401e6b20d4cec482ba1160eb8efe
[root@node1 ~]
# chmod 600 /etc/ha.d/authkeys
|
4、编辑ha.cf主配置文件
1
2
3
4
5
6
7
|
[root@node1 ha.d]
# grep -v '^#' ha.cf |sed '/^$/d'
注释:主要修改两处,其它的都可以默认
logfacility local0
mcast eth1 225.100.100.100 694 1 0
#修改心跳信息的传播方式|组播
auto_failback on
node node1.
test
.com
#配置集群中的节点数
node node2.
test
.com
#配置集群中的节点数
|
5、编辑haresources配置文件
1
2
|
[root@node1 ha.d]
# grep -v '^#' /etc/ha.d/haresources
node1.
test
.com IPaddr::192.168.1.223 Filesystem::192.168.1.198:
/mydata
::
/mydata
::nfs mysqld
|
6、拷贝配置文件到node2节点
1
|
[root@node1 ~]
# scp /etc/ha.d/{ha.cf,haresources,authkeys} root@node2.test.com:/etc/ha.d/
|
五、创建lvm逻辑卷
1、node3主机增加一块10G的新硬盘
2、node3节点硬盘分区,标记为lvm卷
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
[root@node3 ~]
# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0xc42dce64.
Changes will remain
in
memory only,
until
you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (
command
'c'
) and change display
units
to
sectors (
command
'u'
).
Command (m
for
help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1305, default 1): 1
Last cylinder, +cylinders or +size{K,M,G} (1-1305, default 1305):
Using default value 1305
Command (m
for
help): p
Disk
/dev/sdb
: 10.7 GB, 10737418240 bytes
255 heads, 63 sectors
/track
, 1305 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical
/physical
): 512 bytes / 512 bytes
I
/O
size (minimum
/optimal
): 512 bytes / 512 bytes
Disk identifier: 0xc42dce64
Device Boot Start End Blocks Id System
/dev/sdb1
1 1305 10482381 83 Linux
Command (m
for
help): t
Selected partition 1
Hex code (
type
L to list codes): 8e
Changed system
type
of partition 1 to 8e (Linux LVM)
Command (m
for
help): p
Disk
/dev/sdb
: 10.7 GB, 10737418240 bytes
255 heads, 63 sectors
/track
, 1305 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical
/physical
): 512 bytes / 512 bytes
I
/O
size (minimum
/optimal
): 512 bytes / 512 bytes
Disk identifier: 0xc42dce64
Device Boot Start End Blocks Id System
/dev/sdb1
1 1305 10482381 8e Linux LVM
Command (m
for
help): w
The partition table has been altered!
Calling ioctl() to re-
read
partition table.
Syncing disks.
|
3、创建lvm逻辑卷
1
2
3
4
5
6
7
8
9
|
[root@node3 ~]
# yum -y install lvm2
[root@node3 ~]
# pvcreate /dev/sdb1
Physical volume
"/dev/sdb1"
successfully created
[root@node3 ~]
# vgcreate myvg /dev/sdb1
Volume group
"myvg"
successfully created
[root@node3 ~]
# lvcreate -L 9G -n mydata myvg
Logical volume
"mydata"
created
[root@node3 ~]
# lvs |grep mydata
mydata myvg -wi-a----- 9.00g
|
4、格式化lvm逻辑卷
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
[root@node3 ~]
# mkfs.ext4 /dev/myvg/mydata
mke2fs 1.41.12 (17-May-2010)
文件系统标签=
操作系统:Linux
块大小=4096 (log=2)
分块大小=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
589824 inodes, 2359296 blocks
117964 blocks (5.00%) reserved
for
the super user
第一个数据块=0
Maximum filesystem blocks=2415919104
72 block
groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632
正在写入inode表: 完成
Creating journal (32768 blocks): 完成
Writing superblocks and filesystem accounting information: 完成
This filesystem will be automatically checked every 23 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
|
六、安装配置NFS
1、安装nfs服务
1
|
[root@node3 ~]
# yum -y install nfs-utils rpcbind
|
2、配置nfs共享存储目录
1
2
|
[root@node3 ~]
# mkdir /mydata
[root@node3 ~]
# echo "/mydata 192.168.1.0/24(rw,all_squash,anonuid=3306,anongid=3306)" > /etc/exports
|
3、查看nfs的共享目录
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
[root@node3 ~]
# showmount -e 192.168.1.198
clnt_create: RPC: Port mapper failure - Unable to receive: errno 111 (Connection refused)
[root@node3 ~]
# service nfs start
启动 NFS 服务: [确定]
启动 NFS mountd: [失败]
启动 NFS 守护进程:rpc.nfsd: writing fd to kernel failed: errno 111 (Connection refused)
rpc.nfsd: unable to
set
any sockets
for
nfsd
[失败]
[root@node3 ~]
# service rpcbind start
正在启动 rpcbind: [确定]
[root@node3 ~]
# service nfs start
启动 NFS 服务: [确定]
启动 NFS mountd: [确定]
启动 NFS 守护进程: [确定]
正在启动 RPC idmapd: [确定]
[root@node3 ~]
# showmount -e 192.168.1.198
Export list
for
192.168.1.198:
/mydata
192.168.1.0
/24
|
4、本地挂载lvm逻辑卷
1
2
3
4
|
[root@node3 ~]
# mount /dev/myvg/mydata /mydata/
[root@node3 ~]
# ll /mydata/
总用量 16
drwx------ 2 root root 16384 12月 31 09:02 lost+found
|
如果以上都没有问题的话,接下来我们就为安装配置mysql做准备!!!
七、安装配置MySQL
1、node3节点上创建用户
1
2
3
|
[root@node3 ~]
# useradd -g mysql -u 3306 -s /sbin/nologin -M mysql
[root@node3 ~]
# id mysql
uid=3306(mysql) gid=3306(mysql) 组=3306(mysql)
|
2、赋予挂载的lvm逻辑卷的宿主和组为mysql
1
2
3
|
[root@node3 ~]
# chown -R mysql.mysql /mydata/
[root@node3 ~]
# ll -d /mydata/
drwxr-xr-x 3 mysql mysql 4096 12月 31 09:02
/mydata/
|
到了这里NFS的配置基本就差不多了。接下来我们在node1节点上安装配置mysql服务
node1:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
|
1、创建用户
[root@node1 ~]
# groupadd -g 3306 mysql
[root@node1 ~]
# useradd -g mysql -u 3306 -s /sbin/nologin -M mysql
[root@node1 ~]
# id mysql
uid=3306(mysql) gid=3306(mysql) 组=3306(mysql)
2、解压MySQL安装包
[root@node1 ~]
# tar xf mysql-5.6.12-linux-glibc2.5-x86_64.tar.gz -C /usr/local/
[root@node1 ~]
# ln -sv /usr/local/mysql-5.6.12-linux-glibc2.5-x86_64 /usr/local/mysql
"/usr/local/mysql"
->
"/usr/local/mysql-5.6.12-linux-glibc2.5-x86_64"
3、赋予MySQL安装包权限
[root@node1 ~]
# chown -R root.mysql /usr/local/mysql/
[root@node1 ~]
# ll /usr/local/mysql/
总用量 76
drwxr-xr-x 2 root mysql 4096 12月 31 09:15 bin
-rw-r--r-- 1 root mysql 17987 6月 20 2013 COPYING
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 data
drwxr-xr-x 2 root mysql 4096 12月 31 09:16 docs
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 include
-rw-r--r-- 1 root mysql 7469 6月 20 2013 INSTALL-BINARY
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 lib
drwxr-xr-x 4 root mysql 4096 12月 31 09:15
man
drwxr-xr-x 10 root mysql 4096 12月 31 09:16 mysql-
test
-rw-r--r-- 1 root mysql 2496 6月 20 2013 README
drwxr-xr-x 2 root mysql 4096 12月 31 09:16 scripts
drwxr-xr-x 28 root mysql 4096 12月 31 09:15 share
drwxr-xr-x 4 root mysql 4096 12月 31 09:16 sql-bench
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 support-files
4、挂载NFS的共享目录并创建MySQL数据存放目录
[root@node1 ~]
# mkdir /mydata
[root@node1 ~]
# mount -t nfs4 192.168.1.198:/mydata /mydata
[root@node1 ~]
# ll /mydata/
总用量 16
drwx------ 2 mysql mysql 16384 12月 31 09:02 lost+found
[root@node1 ~]
# mkdir /mydata/data
[root@node1 ~]
# ll /mydata/
总用量 20
drwxr-xr-x 2 mysql mysql 4096 12月 31 09:24 data
drwx------ 2 mysql mysql 16384 12月 31 09:02 lost+found
5、初始化数据库
[root@node1 ~]
# yum -y install libaio
[root@node1 ~]
# /usr/local/mysql/scripts/mysql_install_db --user=mysql --datadir=/mydata/data/ --basedir=/usr/local/mysql/
6、拷贝配置文件、服务启动脚本文件
[root@node1 ~]
# cp /usr/local/mysql/support-files/my-default.cnf /etc/my.cnf
cp
:是否覆盖
"/etc/my.cnf"
?
yes
[root@node1 ~]
# cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
7、修改MySQL配置文件
[root@node1 ~]
# vim /etc/my.cnf
datadir =
/mydata/data
innodb_file_per_table = 1
8、启动服务并登陆测试
[root@node1 ~]
# service mysqld start
Starting MySQL.. SUCCESS!
[root@node1 ~]
# /usr/local/mysql/bin/mysql -e "show databases;"
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
|
test
|
+--------------------+
9、拷贝MySQL配置文件、启动服务脚本文件到node2节点上
[root@node1 ~]
# scp /etc/init.d/mysqld /etc/init.d/mysqld
[root@node1 ~]
# scp /etc/init.d/mysqld node2:/etc/init.d/mysqld
10、停止MySQL服务
[root@node1 ~]
# service mysqld stop
Shutting down MySQL.. SUCCESS!
|
node2:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
1、创建用户
[root@node2 ~]
# groupadd -g 3306 mysql
[root@node2 ~]
# useradd -g mysql -u 3306 -s /sbin/nologin -M mysql
[root@node2 ~]
# id mysql
uid=3306(mysql) gid=3306(mysql) 组=3306(mysql)
2、解压MySQL安装包
[root@node2 ~]
# tar xf mysql-5.6.12-linux-glibc2.5-x86_64.tar.gz -C /usr/local/
[root@node2 ~]
# ln -sv /usr/local/mysql-5.6.12-linux-glibc2.5-x86_64 /usr/local/mysql
"/usr/local/mysql"
->
"/usr/local/mysql-5.6.12-linux-glibc2.5-x86_64"
3、赋予MySQL安装包权限
[root@node2 ~]
# chown -R root.mysql /usr/local/mysql/
[root@node2 ~]
# ll /usr/local/mysql/
总用量 76
drwxr-xr-x 2 root mysql 4096 12月 31 09:15 bin
-rw-r--r-- 1 root mysql 17987 6月 20 2013 COPYING
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 data
drwxr-xr-x 2 root mysql 4096 12月 31 09:16 docs
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 include
-rw-r--r-- 1 root mysql 7469 6月 20 2013 INSTALL-BINARY
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 lib
drwxr-xr-x 4 root mysql 4096 12月 31 09:15
man
drwxr-xr-x 10 root mysql 4096 12月 31 09:16 mysql-
test
-rw-r--r-- 1 root mysql 2496 6月 20 2013 README
drwxr-xr-x 2 root mysql 4096 12月 31 09:16 scripts
drwxr-xr-x 28 root mysql 4096 12月 31 09:15 share
drwxr-xr-x 4 root mysql 4096 12月 31 09:16 sql-bench
drwxr-xr-x 3 root mysql 4096 12月 31 09:16 support-files
4、挂载NFS的共享目录
[root@node2 ~]
# mkdir /mydata
[root@node2 ~]
# mount -t nfs4 192.168.1.198:/mydata /mydata
[root@node2 ~]
# ll /mydata/
总用量 20
drwxr-xr-x 5 mysql mysql 4096 12月 31 09:28 data
drwx------ 2 mysql mysql 16384 12月 31 09:02 lost+found
5、启动MySQL服务
[root@node2 ~]
# service mysqld start
Starting MySQL. ERROR! The server quit without updating PID
file
(
/mydata/data/node2
.
test
.com.pid).
6、查看日志
[root@node2 data]
# tail -f node2.test.com.err
141231 09:39:10 mysqld_safe Starting mysqld daemon with databases from
/mydata/data
/usr/local/mysql/bin/mysqld
: error
while
loading shared libraries: libaio.so.1: cannot
open
shared object
file
: No such
file
or directory
141231 09:39:10 mysqld_safe mysqld from pid
file
/mydata/data/node2
.
test
.com.pid ended
哦!原来是没有安装libaio包
[root@node2 ~]
# yum -y install libaio
再次启动MySQL服务
[root@node2 ~]
# service mysqld start
Starting MySQL.. SUCCESS!
|
八、启动Heartbeat服务
1、在node1和node2上启动hearbeat服务
1
2
3
4
5
6
7
|
[root@node1 ~]
# service heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@node1 ~]
# ssh node2 "service heartbeat start"
Starting High-Availability services: 2014
/12/31_09
:50:20 INFO: Resource is stopped
Done.
|
2、查看vip、共享挂载、MySQL服务
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
[root@node1 ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link
/ether
00:0c:29:c7:14:97 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.196
/24
brd 255.255.255.255 scope global eth1
inet 192.168.1.223
/24
brd 255.255.255.255 scope global secondary eth1
inet6 fe80::20c:29ff:fec7:1497
/64
scope link
valid_lft forever preferred_lft forever
[root@node1 ~]
# df -h
文件系统 容量 已用 可用 已用%% 挂载点
/dev/mapper/VolGroup-lv_root
16G 2.7G 12G 19% /
tmpfs 495M 0 495M 0%
/dev/shm
/dev/sda1
485M 32M 428M 7%
/boot
192.168.1.198:
/mydata
8.9G 259M 8.2G 4%
/mydata
[root@node1 ~]
# service mysqld status
SUCCESS! MySQL running (3650)
|
3、赋予授权用户权限
因为涉及到MySQL的高可用,那么客户端肯定要远程登录MySQL,所以首先赋予远程登录的权限。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
[root@node1 ~]
# /usr/local/mysql/bin/mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 1
Server version: 5.6.12 MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql> grant all privileges on *.* to root@
'%'
;
Query OK, 0 rows affected (0.05 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.03 sec)
mysql> \q
Bye
|
九、测试MySQL服务的高可用
1、测试前的准备工作
注释:卸载node1和node2节点的挂载共享目录、停止MySQL服务并开机不自启动
node1:
1
2
3
4
5
6
7
8
9
10
11
|
[root@node1 ~]
# df
文件系统 1K-块 已用 可用 已用% 挂载点
/dev/mapper/VolGroup-lv_root
16134560 2786364 12528588 19% /
tmpfs 506272 0 506272 0%
/dev/shm
/dev/sda1
495844 32418 437826 7%
/boot
[root@node1 ~]
# chkconfig mysqld off
[root@node1 ~]
# chkconfig --list mysqld
mysqld 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
[root@node1 ~]
# service mysqld status
ERROR! MySQL is not running
|
node2:
1
2
3
4
5
6
7
8
9
10
11
|
[root@node2 ~]
# df
文件系统 1K-块 已用 可用 已用% 挂载点
/dev/mapper/VolGroup-lv_root
16134560 2786256 12528696 19% /
tmpfs 506272 0 506272 0%
/dev/shm
/dev/sda1
495844 32418 437826 7%
/boot
[root@node2 ~]
# chkconfig mysqld off
[root@node2 ~]
# chkconfig --list mysqld
mysqld 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
[root@node2 ~]
# service mysqld status
ERROR! MySQL is not running
|
2、启动node1和node2节点的hearbeat服务并查看相关信息
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
[root@node1 ~]
# ssh node2 "service heartbeat restart"
Stopping High-Availability services: Done.
Waiting to allow resource takeover to complete:Done.
Starting High-Availability services: 2014
/12/31_10
:27:01 INFO: Resource is stopped
Done.
[root@node1 ~]
# service heartbeat restart
Stopping High-Availability services: Done.
Waiting to allow resource takeover to complete:Done.
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@node1 ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link
/ether
00:0c:29:c7:14:97 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.196
/24
brd 255.255.255.255 scope global eth1
inet 192.168.1.223
/24
brd 255.255.255.255 scope global secondary eth1
inet6 fe80::20c:29ff:fec7:1497
/64
scope link
valid_lft forever preferred_lft forever
[root@node1 ~]
# df -H
文件系统 容量 已用 可用 已用%% 挂载点
/dev/mapper/VolGroup-lv_root
17G 2.9G 13G 19% /
tmpfs 519M 0 519M 0%
/dev/shm
/dev/sda1
508M 34M 449M 7%
/boot
192.168.1.198:
/mydata
9.6G 272M 8.8G 4%
/mydata
[root@node1 ~]
# service mysqld status
SUCCESS! MySQL running (4853)
|
3、客户端连接测试
首先:当vip在node1上的时候,登录测试
其次:停止node1上的heartbeat服务,或者直接关机,客户端登录验证
1
|
[root@node1 ~]
# init 0
|
node2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
[root@node2 ha.d]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link
/ether
00:0c:29:ad:9f:36 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.197
/24
brd 255.255.255.255 scope global eth1
inet 192.168.1.223
/24
brd 255.255.255.255 scope global secondary eth1
inet6 fe80::20c:29ff:fead:9f36
/64
scope link
valid_lft forever preferred_lft forever
[root@node2 ha.d]
# df
文件系统 1K-块 已用 可用 已用% 挂载点
/dev/mapper/VolGroup-lv_root
16134560 2786276 12528676 19% /
tmpfs 506272 0 506272 0%
/dev/shm
/dev/sda1
495844 32418 437826 7%
/boot
192.168.1.198:
/mydata
9289088 264704 8552512 4%
/mydata
[root@node2 ha.d]
# service mysqld status
SUCCESS! MySQL running (6494)
|
这个模拟测试的意思就是:
无论你在node1节点上是关闭hearbeat服务还是直接关闭主机电源都不影响客户端正常的使用数据库系统;
反过来也就是说:
无论你在node2节点上是关闭hearbeat服务还是直接关闭电源也都不会影响客户端正常的使用数据库系统。
不足之处,虽然通过Hearbeat对MySQl服务做了高可用,无论是对node1节点进行操作还是对node2节点进行操作,都不会影响数据库系统的使用,但是如果node3节点出现了故障,那么我们的数据库系统就无法使用了。所以,我就想起了之前在网上看了一篇技术性文章MFS,似乎是NFS的升级版,可以解决NFS的单点故障,这也很大程度上的弥补了架构的不足,我也会在后续章节中介绍MFS的使用。
这里有一点十分重要,也就是这一点困扰了我很长一段时间:
NFS有四个版本 V1版本是sun公司内部使用的,V2是第一个公开使用的版本,V3是目前主流的版本也是RedHat 5.x自带的默认版本,但是在RedHat 6.x上NFS的版本却变成了V4版本,V4版本在前版本的基础上做了很大的补充,如果你还是用V3的方式去挂载NFS共享目录,那么你就会走到一个误区,导致后续的实验无法进行,至于NFS V4的介绍可以参考链接的相关资料
http://www.cyberciti.biz/faq/centos-fedora-rhel-nfs-v4-configuration/
如有不足之处,希望朋友指出。谢谢