实验中通过Docker swarm建立RabbitMQ集群,同时我们将专注于基于Consul的RabbitMQ集群。我们还将添加HAProxy服务器场以负载均衡AMQP请求,并提高群集的整体可用性。以下是最终的逻辑拓扑。
So, Let’s get started! 👊
安装docker swarm集群
主机名 | IP |
---|---|
master | 192.168.99.128 |
Node1 | 192.168.99.133 |
Node2 | 192.168.99.134 |
安装docker
curl -fsSL get.docker.com -o get-docker.sh
CHANNEL=stable sh get-docker.sh --mirror Aliyun
root@master:~# docker -v
Docker version 19.03.12, build 48a66213fe
初始化docker swarm集群
- 在master上初始化集群
#Init Docker Swarm - run on master
root@master:~# docker swarm init --advertise-addr 192.168.99.128
Swarm initialized: current node (htu24tq7mhip4ca7a9uqnndef) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1ifiwgcc2k4ztyr80zm6e3vj7ewbbi1fqfnba1vfisesnos6o4-es73sy0dx4ie5lbivx90mcbky 192.168.99.128:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
- node1加入集群
root@node1:~# docker swarm join --token SWMTKN-1-1ifiwgcc2k4ztyr80zm6e3vj7ewbbi1fqfnba1vfisesnos6o4-es73sy0dx4ie5lbivx90mcbky 192.168.99.128:2377
This node joined a swarm as a worker.
root@master:~# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
htu24tq7mhip4ca7a9uqnndef * master.devopsman.cn Ready Active Leader 19.03.12
vlfb0ibx4k1z41r76yhgpummk node1.devopsman.cn Ready Active 19.03.12
root@master:~# docker node promote node1.devopsman.cn
Node node1.devopsman.cn promoted to a manager in the swarm.
- node2加入集群
root@node2:~# docker swarm join --token SWMTKN-1-1ifiwgcc2k4ztyr80zm6e3vj7ewbbi1fqfnba1vfisesnos6o4-es73sy0dx4ie5lbivx90mcbky 192.168.99.128:2377
This node joined a swarm as a worker.
root@master:~# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
htu24tq7mhip4ca7a9uqnndef * master.devopsman.cn Ready Active Leader 19.03.12
vlfb0ibx4k1z41r76yhgpummk node1.devopsman.cn Ready Active Reachable 19.03.12
99f5xkfepct301n48tqqslz22 node2.devopsman.cn Ready Active 19.03.12
root@master:~# docker node promote node2.devopsman.cn
Node node2.devopsman.cn promoted to a manager in the swarm.
root@master:~# docker network create --driver=overlay --attachable prod
q5rzw0i1xdh9kovytjeg0sj9p
root@master:~# docker network ls
NETWORK ID NAME DRIVER SCOPE
ba6f05d438cf bridge bridge local
ee47cd6a4247 docker_gwbridge bridge local
8df9d4ecb5be host host local
rl5o9jz6fxp8 ingress overlay swarm
25lk998untvk local overlay swarm
3a66aac75dbf none null local
q5rzw0i1xdh9 prod overlay swarm
部署consul集群
注意点
- 持久化consul的数据,避免数据丢失
- 在
global
模式下部署Consul服务,并通过节点标签管理服务调度 - 使用两个单独的网络,一个用于内部consul的通信,另一个用于RabbitMQ与Consul服务之间的通信
consul部署清单
version: '3.6'
services:
consul:
image: consul:1.4.0
hostname: "{{.Node.Hostname}}"
networks:
- consul
- prod
ports:
- 8400:8400
- 8500:8500
- 8600:53
volumes:
- consul-data:/consul/data
deploy:
mode: global
placement:
constraints: [node.labels.consul == true]
command: [ "agent", "-server", "-bootstrap-expect=3", "-retry-max=3", "-retry-interval=10s", "-datacenter=prod", "-join=consul", "-retry-join=consul", "-bind={{ GetInterfaceIP \"eth0\" }}", "-client=0.0.0.0", "-ui"]
networks:
consul:
prod:
external: true
volumes:
consul-data:
初始化consul集群
docker node update --label-add consul=true node1.devopsman.cn
node1.devopsman.cn
docker stack deploy -c docker-compose_consul.yaml consul
# 等待10s
root@master:~/consul_rabbitmq_docker# docker node update --label-add consul=true master.devopsman.cn
master.devopsman.cn
root@master:~/consul_rabbitmq_docker# docker node update --label-add consul=true node1.devopsman.cn
node1.devopsman.cn
root@master:~/consul_rabbitmq_docker# curl 192.168.99.134:8500/v1/status/leader
"10.0.3.5:8300"
root@master:~/consul_rabbitmq_docker# curl 192.168.99.134:8500/v1/status/peers
["10.0.3.5:8300","10.0.3.7:8300","10.0.3.9:8300"]
root@master:~/consul_rabbitmq_docker#
在浏览器打开http://192.168.99.128:8500访问consul的dashboard来验证安装是否成功。
部署RabbitMQ集群
注意点
- 持久化数据防止数据丢失。
- 在
global
模式下部署RabbitMQ服务,并通过节点标签管理服务调度 - 使用Prod网络进行
内部/外部
RabbitMQ通信 - 不要暴露
RABBITMQ_ERLANG_COOKIE
andRABBITMQ_DEFAULT_PASS
- 主机名很重要,因为RabbitMQ使用主机名作为数据目录
RabbitMQ部署清单
version: "3.6"
services:
rabbitmq-01:
image: olgac/rabbitmq:3.7.8-management
hostname: rabbitmq-01
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=Passw0rd
- RABBITMQ_ERLANG_COOKIE="MY-SECRET-KEY-123"
networks:
- prod
volumes:
- rabbitmq-01-data:/var/lib/rabbitmq
deploy:
mode: global
placement:
constraints: [node.labels.rabbitmq1 == true]rabbitmq-02:
image: olgac/rabbitmq:3.7.8-management
hostname: rabbitmq-02
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=Passw0rd
- RABBITMQ_ERLANG_COOKIE="MY-SECRET-KEY-123"
networks:
- prod
volumes:
- rabbitmq-02-data:/var/lib/rabbitmq
deploy:
mode: global
placement:
constraints: [node.labels.rabbitmq2 == true]rabbitmq-03:
image: olgac/rabbitmq:3.7.8-management
hostname: rabbitmq-03
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=Passw0rd
- RABBITMQ_ERLANG_COOKIE="MY-SECRET-KEY-123"
networks:
- prod
volumes:
- rabbitmq-03-data:/var/lib/rabbitmq
deploy:
mode: global
placement:
constraints: [node.labels.rabbitmq3 == true]networks:
prod:
external: true
volumes:
rabbitmq-01-data:
rabbitmq-02-data:
rabbitmq-03-data:
config/enabled_plugins
[rabbitmq_management,
rabbitmq_peer_discovery_consul,
rabbitmq_federation,
rabbitmq_federation_management,
rabbitmq_shovel,
rabbitmq_shovel_management].
config/rabbitmq.conf
loopback_users.admin = false
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_consul
cluster_formation.consul.host = consul
cluster_formation.node_cleanup.only_log_warning = true
cluster_formation.consul.svc_addr_auto = true
cluster_partition_handling = autoheal#Flow Control is triggered if memory usage above %80.
vm_memory_high_watermark.relative = 0.8#Flow Control is triggered if free disk size below 5GB.
disk_free_limit.absolute = 5GB
RabbitMQ集群初始化
root@master:~/consul_rabbitmq_docker# docker node update --label-add rabbitmq1=true master.devopsman.cn
master.devopsman.cn
root@master:~/consul_rabbitmq_docker# docker node update --label-add rabbitmq2=true node1.devopsman.cn
node1.devopsman.cn
root@master:~/consul_rabbitmq_docker# docker node update --label-add rabbitmq3=true node2.devopsman.cn
node2.devopsman.cn
root@master:~/consul_rabbitmq_docker# docker stack deploy -c docker-compose_rabbitmq.yml rabbitmq
Creating service rabbitmq_rabbitmq-03
Creating service rabbitmq_rabbitmq-01
Creating service rabbitmq_rabbitmq-02
root@master:~/consul_rabbitmq_docker#
配置haproxy代理端口
haproxy部署清单
version: "3.6"
services:
haproxy:
image: olgac/haproxy-for-rabbitmq:1.8.14-alpine
ports:
- 15672:15672
- 5672:5672
- 1936:1936
networks:
- prod
deploy:
mode: global
networks:
prod:
external: true
config/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
defaults
log global
option tcplog
option dontlognull
timeout connect 6s
timeout client 60s
timeout server 60s
listen stats
bind *:1936
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /listen rabbitmq
bind *:5672
mode tcp
server rabbitmq-01 rabbitmq-01:5672 check
server rabbitmq-02 rabbitmq-02:5672 check
server rabbitmq-03 rabbitmq-03:5672 checklisten rabbitmq-ui
bind *:15672
mode http
server rabbitmq-01 rabbitmq-01:15672 check
server rabbitmq-02 rabbitmq-02:15672 check
server rabbitmq-03 rabbitmq-03:15672 check
验证RabbitMQ集群
访问 http://192.168.99.128:15672 RabbitMQ的控制后台,密码为:(admin/Passw0rd)
最后,查看一下haproxy的后台。