2.4 配置负载均衡器
两台相同
hostnamectl set-hostname nginx01 hostnamectl set-hostname nginx02 #将nginx.repo移到/etc/yum.repos.d/ yum -y install nginx #配置四层反向代理,配置在http外面 #使用轮询算法 vim /etc/nginx/nginx.conf stream{ upstream k8s-masters{ server 192.168.109.131:6443; server 192.168.109.134:6443; } server{ listen 6443; proxy_pass k8s-masters; } } #启动 systemctl enable --now nginx
负载均衡器高可用
#安装keepalived yum install -y keepalived #在/etc/keepalived目录下创建nginx检测脚本 cd /etc/keepalived/ vim check_nginx.sh #!/bin/bash #检测nginx是否启动了 A=`ps -C nginx --no-header |wc -l` if [ $A -eq 0 ];then #如果nginx没有启动就启动nginx systemctl start nginx #重启nginx if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then #nginx重启失败,则停掉keepalived服务,进行VIP转移 killall keepalived fi fi #给脚本执行权限 chmod +x check_nginx.sh #修改keepalived配置文件 vim keepalived.conf ! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 #修改邮箱地址 smtp_connect_timeout 30 router_id NGINX_01 #修改主备id #删掉这里的四行vrrp } #加入周期性检测nginx服务脚本的相关配置 vrrp_script check_nginx{ script "/etc/keepalived/check_nginx.sh" #心跳执行的脚本,检测nginx是否启动 interval 2 #(检测脚本执行的间隔,单位是秒) } vrrp_instance VI_1 { state MASTER interface ens33 #修改网卡名称 virtual_router_id 51 priority 100 #优先级,主不改,备改成比100小就行 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.109.200 #修改VIP地址 } #添加跟踪(执行脚本) track_script{ check_nginx } } #重启服务 systemctl restart keepalived.service #备服务器下载好keepalived后,在主服务器上将脚本和keepalived配置文件传过去 [root@nginx01 keepalived]# scp * 192.168.109.135:`pwd` #传过去后修改三处 router_id NGINX_02 state BACKUP priority 90 #然后重启服务 systemctl restart keepalived.service
2.5 修改其他节点
#将两个node节点的bootstrap.kubeconfig、kubelet.kubeconfig、kube-proxy.kubeconfig中的server改为VIP地址 cd kubernetes/cfg/ vim bootstrap.kubeconfig vim kubelet.kubeconfig vim kube-proxy.kubeconfig server: https://192.168.109.200:6443 #重启 systemctl restart kubelet kube-proxy #两个master节点中的kubectl配置文件改为VIP地址 vim ~/.kube/config server: https://192.168.109.200:6443
2.6 创建pod
2.6.1 kubectl创建pod
[root@master01 ~]# kubectl get pods No resources found in default namespace. [root@master01 ~]# kubectl run stevelu01 --image=nginx:1.14 pod/stevelu01 created [root@master01 ~]# kubectl run stevelu02 --image=nginx:1.14 pod/stevelu02 created [root@master01 ~]# kubectl run stevelu03 --image=nginx:1.14 pod/stevelu03 created [root@master01 ~]# kubectl get pods NAME READY STATUS RESTARTS AGE stevelu01 0/1 ContainerCreating 0 18s stevelu02 1/1 Running 0 10s stevelu03 1/1 Running 0 6s [root@master01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES stevelu01 1/1 Running 0 35s 10.244.1.8 192.168.109.133 <none> <none> stevelu02 1/1 Running 0 27s 10.244.0.10 192.168.109.132 <none> <none> stevelu03 1/1 Running 0 23s 10.244.0.11 192.168.109.132 <none> <none> #创建基于centos镜像的pod kubectl run centos01 -it --image=centos:7
并且可以看到pod运行的节点不一样,由调度算法决定
2.6.2 dashboard创建pod
总结
多master节点部署步骤
先部署master02等其它master节点
搭建Nginx/Haproxy +keepalived高可用负载均衡对接master节点群集
修改node节点上的kubelet kube-proxy的kubeconfig配置文件对接 vip
kubectl的配置文件也要对接VIP或者当前节点的IP