1 实战拓扑
为了测试lvs的高可用,这里需要增加一台lvs服务器,需在此服务器上安装ipvsadm。
2 keepAlived安装和配置
2.1 安装keepAlived
在两台lvs服务器上都需要安装keepAlived,安装命令如下:
yum install -y keepalived
keepAlived安装完成后,在/etc/keepalived目录下有一个keepalived.conf配置文件,内容如下:
! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 192.168.200.1 smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0 } #上面的配置无需关注,重点关注和修改下面的配置 vrrp_instance VI_1 { state MASTER#标识当前lvs是主,根据实际lvs服务器规划确定,可选值MASTER和BACKUP interface eth0#lvs服务器提供服务器的网卡,根据实际服务器网卡进行修改 virtual_router_id 51#lvs提供的服务所属ID,目前无需修改 priority 100#lvs服务器的优先级,主服务器最高,备份服务器要低于主服务器 advert_int 1 authentication { auth_type PASS auth_pass 1111 } #virtual_ipaddress用于配置VIP和LVS服务器的网卡绑定关系,一般需要修改 #示例: 192.168.25.100/24 dev ens33 label ens33:9 virtual_ipaddress { 192.168.200.16 192.168.200.17 192.168.200.18 } } #配置lvs服务策略,相当于ipvsadm -A -t 192.168.25.100:80 -s rr,一般需要修改 virtual_server 192.168.200.100 443 { delay_loop 6 lb_algo rr#配置lvs调度算法,默认轮询 lb_kind NAT#配置lvs工作模式,可以改为DR persistence_timeout 50#用于指定同一个client在多久内,只去请求第一次提供服务的RS,为查 看轮询效 果,这里需要改为0 protocol TCP#TCP协议 #配置RS信息,相当于ipvsadm -a -t 192.168.25.100:80 -r 192.168.25.112 -g real_server 192.168.201.100 443 { weight 1#当前RS的权重 SSL_GET {#SSL_GET健康检查,一般改为HTTP_GET #两个url可以删除一个,url内的内容改为path /和status_code 200,digest删除 url { path / digest ff20ad2481f97b1754ef3e12ecd3a9cc } url { path /mrtg/ digest 9b3a0c85a887a256d6939da88aabd8cd } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } } #下面的配置实际是两组lvs服务的配置,含义和上面的lvs服务配置一致。如果用不到,下面的配置可以全部 删除 virtual_server 10.10.10.2 1358 { delay_loop 6 lb_algo rr lb_kind NAT persistence_timeout 50 protocol TCP sorry_server 192.168.200.200 1358 real_server 192.168.200.2 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.200.3 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334c } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334c } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } } virtual_server 10.10.10.3 1358 { delay_loop 3 lb_algo rr lb_kind NAT persistence_timeout 50 protocol TCP real_server 192.168.200.4 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.200.5 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } }
2.2 配置keepAlived
基于上述配置文件和实战拓扑图及服务器规划,对两台lvs服务器分别修改keepalived.conf配置如下:
lvs主服务器
! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 192.168.200.1 smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0 } vrrp_instance VI_1 { state MASTER#主服务器 interface ens33 virtual_router_id 51 priority 100#优先级高 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.25.100/24 dev ens33 label ens33:9 } } virtual_server 192.168.25.100 80 { delay_loop 6 lb_algo rr lb_kind DR persistence_timeout 0 protocol TCP real_server 192.168.25.112 80 { weight 1 HTTP_GET { url { path / status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.25.113 80 { weight 1 HTTP_GET { url { path / status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } }
lvs备份服务器
! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 192.168.200.1 smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0 } vrrp_instance VI_1 { state BACKUP#备份服务器 interface ens33 virtual_router_id 51 priority 50#优先级低于主服务器 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.25.100/24 dev ens33 label ens33:9 } } virtual_server 192.168.25.100 80 { delay_loop 6 lb_algo rr lb_kind DR persistence_timeout 0 protocol TCP real_server 192.168.25.112 80 { weight 1 HTTP_GET { url { path / status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.25.113 80 { weight 1 HTTP_GET { url { path / status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } }
注意:配置文件中的key和大括号之间一定要有空格
2.3 启动keepAlived
在两台lvs服务器上分别启动keepAlived,命令如下:
service keepalived start
3 高可用测试
3.1 测试环境检查
上述步骤执行完毕后,可以在lvs主服务器和备份服务器分别执行ifconfig命令,可以查看到VIP被绑定到
了主服务器,如下:
[root@lvs01 keepalived]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.110 netmask 255.255.255.0 broadcast 192.168.25.255 inet6 fe80::64ba:dea0:c4c3:6593 prefixlen 64 scopeid 0x20<link> inet6 fe80::fe9:a7ce:86ef:28b0 prefixlen 64 scopeid 0x20<link> inet6 fe80::c569:ba05:f195:be69 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:b6:e4:aa txqueuelen 1000 (Ethernet) RX packets 18340 bytes 8096780 (7.7 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 26632 bytes 2234108 (2.1 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ens33:9: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.100 netmask 255.255.255.0 broadcast 0.0.0.0 ether 00:0c:29:b6:e4:aa txqueuelen 1000 (Ethernet)
这样,就可以在客户端请求VIP192.168.25.100来进行测试。
3.2 测试负载均衡
在客户端发起请求,测试负载均衡,如下:
[root@client ~]# curl 192.168.25.100 this is RS02 [root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS02 [root@client ~]# curl 192.168.25.100 this is RS01
3.3 测试RS高可用
关闭一台RS后(这里可以使用ifconfig 网卡名 down命令暂时关闭网卡),客户端继续发起请求,查看是
否可以正常访问,如下:
[root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS01
会发现,此时客户端可以正常访问,但只有RS1在提供服务。这说明,keepAlived检测到了RS2服务器
异常,将其剔除了。
此时再启动RS2服务器,客户端继续访问,会发现响应结果如下,keepAlived检测到RS2服务器恢复正
常,又将其加入服务列表了。
[root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS02 [root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS02
3.4 测试LVS高可用
这里主要进行两个测试:
测试lvs主服务宕机
使用ifconfig 网卡名 down命令,关闭主服务器网卡,此时主服务器不能提供服务。观察备份服务器是
否将VIP绑定到自己,以及客户端是否可以继续正常访问。如下:
关闭主服务器网卡
root@lvs01 keepalived]# ifconfig ens33 down
观察备份服务器,会发现VIP已经绑定过来了。这里实际是keepAlived检测到了主服务器的异常,而做
出的故障转移和自动切换。
[root@lvs02 keepalived]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.111 netmask 255.255.255.0 broadcast 192.168.25.255 inet6 fe80::64ba:dea0:c4c3:6593 prefixlen 64 scopeid 0x20<link> inet6 fe80::fe9:a7ce:86ef:28b0 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:f7:eb:69 txqueuelen 1000 (Ethernet) RX packets 27793 bytes 17906988 (17.0 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 14632 bytes 1414979 (1.3 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ens33:9: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.100 netmask 255.255.255.0 broadcast 0.0.0.0 ether 00:0c:29:f7:eb:69 txqueuelen 1000 (Ethernet)
观察客户端是否可以继续正常访问
[root@client ~]# curl 192.168.25.100 this is RS02 [root@client ~]# curl 192.168.25.100 this is RS01 [root@client ~]# curl 192.168.25.100 this is RS02
测试lvs主服务器恢复
上述测试通过后,可以开启主服务器网卡,让其能够提供服务,然后观察VIP是否会回到主服务器。
开启主服务器网卡
ifconfig ens33 up
查看主服务器和备份服务器
主服务器
[root@lvs01 ~]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.110 netmask 255.255.255.0 broadcast 192.168.25.255 inet6 fe80::64ba:dea0:c4c3:6593 prefixlen 64 scopeid 0x20<link> inet6 fe80::fe9:a7ce:86ef:28b0 prefixlen 64 scopeid 0x20<link> inet6 fe80::c569:ba05:f195:be69 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:b6:e4:aa txqueuelen 1000 (Ethernet) RX packets 19691 bytes 8264423 (7.8 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 29148 bytes 2426835 (2.3 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ens33:9: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.100 netmask 255.255.255.0 broadcast 0.0.0.0 ether 00:0c:29:b6:e4:aa txqueuelen 1000 (Ethernet)
备份服务器
[root@lvs02 keepalived]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.25.111 netmask 255.255.255.0 broadcast 192.168.25.255 inet6 fe80::64ba:dea0:c4c3:6593 prefixlen 64 scopeid 0x20<link> inet6 fe80::fe9:a7ce:86ef:28b0 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:f7:eb:69 txqueuelen 1000 (Ethernet) RX packets 28293 bytes 17968506 (17.1 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 15420 bytes 1479137 (1.4 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 inet6 ::1 prefixlen 128 scopeid 0x10<host> loop txqueuelen 1 (Local Loopback) RX packets 31 bytes 2720 (2.6 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 31 bytes 2720 (2.6 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0