kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)

简介: kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)

kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)


集群角色

角色

IP地址 主机名
master1 192.168.26.100 k8s-01
master2/node 192.168.26.120 k8s-02
master3/node 192.168.26.130 k8s-03
node 192.168.26.170 k8s-04

vip

192.168.26.150


在192.168.26.100这台master操作


这里写个脚本

我们用master1机器做ssh免密

快速修改hosts和主机名

##先添加hosts文件
cat >> /etc/hosts <<EOF 
192.168.26.100 k8s-01
192.168.26.120 k8s-02
192.168.26.130 k8s-03
192.168.26.170 k8s-04
EOF
[root@localhost ~]# yum -y install expect ##4台都安装
[root@localhost ~]# ssh-keygen ##一顿回车 生成公钥
vim ssh-hosts.sh
#!/bin/bash
##下方ip地址和主机名修改成自己的
##656768是密码也修改成自己的
for i in 192.168.26.100 192.168.26.120 192.168.26.130 192.168.26.170 k8s-02 k8s-03 k8s-04;do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
    expect {
        \"*yes/no*\" {send \"yes\r\"; exp_continue}
        \"*password*\" {send \"656768\r\"; exp_continue}
        \"*Password*\" {send \"656768\r\";}
    } "
done
for host_name in k8s-02 k8s-03 k8s-04;do
scp /etc/hosts $host_name:/etc/
done


上面的脚本已经同步了hosts文件,下一步修改对应的主机名

hostnamectl set-hostname k8s-01 ##先修改自己的主机名
ssh k8s-02  ##第二台
hostnamectl set-hostname k8s-02 
exit
ssh k8s-03  ##第三台
hostnamectl set-hostname k8s-03
exit
ssh k8s-04  ##第四台
hostnamectl set-hostname k8s-04
exit
回来100节点 ping检查以下是否有误
[root@k8s-01 ~]# ping k8s-01
PING k8s-01 (192.168.26.100) 56(84) bytes of data.
64 bytes from k8s-01 (192.168.26.100): icmp_seq=1 ttl=64 time=0.020 ms
[root@k8s-01 ~]# ping k8s-02
PING k8s-02 (192.168.26.120) 56(84) bytes of data.
64 bytes from k8s-02 (192.168.26.120): icmp_seq=1 ttl=64 time=0.252 ms
[root@k8s-01 ~]# ping k8s-03
PING k8s-03 (192.168.26.130) 56(84) bytes of data.
64 bytes from k8s-03 (192.168.26.130): icmp_seq=1 ttl=64 time=0.234 ms
[root@k8s-01 ~]# ping k8s-04
PING k8s-04 (192.168.26.170) 56(84) bytes of data.
64 bytes from k8s-04 (192.168.26.170): icmp_seq=1 ttl=64 time=0.151 ms


ok 初始环境准备完事,开始修改各节点系统配置


1. 配置系统环境 (全部机器执行)

以下操作默认每台机器都要执行

##可以每个步骤手敲,也可以直接建个脚本scp到各台机器,自动部署
vim k8s_init.sh
#!/bin/bash
##安装相关软件
yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
wait 
##关闭防火墙firewalld
systemctl stop firewalld && systemctl disable firewalld
wait
##关闭iptables
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
wait
##关闭防火墙SELINUX
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
wait
##关闭swap分区
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
wait
##开启网络转发
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF
wait
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
wait
##系统配置
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
wait
sysctl -p /etc/sysctl.d/k8s.conf
##安装docker环境
yum remove -y docker \
           docker-client \
           docker-client-latest \
           docker-common \
           docker-latest \
           docker-latest-logrotate \
           docker-logrotate \
           docker-selinux \
           docker-engine-selinux \
           docker-engine
wait
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-18.09.9-3.el7
systemctl start docker
systemctl enable docker
sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
wait
##配置加速器
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://bk6kzfqm.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
wait
##重启docker
systemctl daemon-reload
systemctl restart docker
wait
#安装k8s组件
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#重建yum缓存,输入y添加证书认证
wait
yum makecache fast
wait
yum install -y kubelet-1.18.14 kubectl-1.18.14 kubeadm-1.18.14
wait
##将 Container Runtime、kubelet 配置成使用 systemd 来作为 cgroup 驱动
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/sysconfig/kubelet
wait
systemctl daemon-reload
systemctl restart kubelet
systemctl enable --now kubelet
systemctl status kubelet
#安装bash自动补全插件
yum install bash-completion -y
#设置kubectl与kubeadm命令补全,下次login生效
kubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm


2.master节点配置高可用 (3台master机器执行)

1.安装/配置keepalived haproxy

yum install keepalived haproxy -y
##修改配置文件
##修改k8s-01的配置文件
vim /etc/keepalived/keepalived.conf
global_defs {
    router_id LVS_DEVEL_01 //此处变量应保持唯一
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"  //健康检查脚本
    interval 8 
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state MASTER    //主节点
    interface eth0  //网卡名
    mcast_src_ip 192.168.26.100   //本机ip
    virtual_router_id 51     //两台主机需保持一致
    priority 150   //权重,主节点应大于备份节点
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.26.150/24  //虚拟ip
    }
    track_script {
       chk_apiserver
    }
}


k8s-02和k8s-03类似

#vi /etc/keepalived/keepalived.conf
global_defs {
    router_id LVS_DEVEL_02
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh" 
    interval 8
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP  //备份节点
    interface eth0
    mcast_src_ip 192.168.26.120
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.26.150/24
    }
    track_script {
       chk_apiserver
    }
}


2.编辑3台master的检测脚本

#vi /etc/keepalived/check_apiserver.sh
err=0
for k in $(seq 1 5)
do
    check_code=$(pgrep kube-apiserver)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 5
        continue
    else
        err=0
        break
    fi
done
if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi


3.配置haproxy.cfg (3台master都要操作)

cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon 
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------  
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#--------------------------------------------------------------------- 
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver    
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      k8s-01           192.168.26.100:6443  check
    server      k8s-02           192.168.26.120:6443  check
    server      k8s-03           192.168.26.130:6443  check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF


4.启动和检查两个服务

systemctl enable keepalived.service
systemctl start keepalived.service
systemctl status keepalived.service
ip a s eth0
[root@k8s-01 ~]# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:bf:bf:a5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.26.100/24 brd 192.168.26.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 192.168.26.150/24 scope global secondary eth0
       valid_lft forever preferred_lft forever
systemctl enable haproxy
systemctl start haproxy
systemctl status haproxy
ss -anput |grep haproxy
[root@k8s-01 ~]# ss -anput |grep haproxy
udp    UNCONN     0      0         *:56573                 *:*                   users:(("haproxy",pid=69064,fd=6),("haproxy",pid=69063,fd=6))
tcp    LISTEN     0      128       *:1080                  *:*                   users:(("haproxy",pid=69064,fd=7))
tcp    LISTEN     0      128       *:16443                 *:*                   users:(("haproxy",pid=69064,fd=5))


3.初始化集群(k8s-01操作)

kubeadm init \
  --control-plane-endpoint "192.168.26.150:16443" \ 
  --kubernetes-version "1.18.14" \
  --pod-network-cidr "10.0.0.0/8" \
  --service-cidr "172.16.0.0/16" \
  --token "abcdef.0123456789abcdef" \
  --token-ttl "0" \
  --image-repository registry.aliyuncs.com/google_containers \
  --upload-certs
##上面的命令会有格式问题请勿直接粘贴,用下面粘贴版本 
kubeadm init --control-plane-endpoint "192.168.26.150:16443" --kubernetes-version "1.18.14" --pod-network-cidr "10.0.0.0/8" --service-cidr "172.16.0.0/16" --token "abcdef.0123456789abcdef" --token-ttl "0" --image-repository registry.aliyuncs.com/google_containers --upload-certs
##初始命令会拉取集群镜像根据网络不同,等待时间不同,耐心等待


参数解释:

–control-plane-endpoint:为控制平面指定一个固定的虚拟IP地址。其值应与负载均衡vip一致,若负载均衡与master位于同一主机,请指定与6443不同的端口号。


–kubernetes-version:指定kubernetes版本号。


–pod-network-cidr:指定pod网络的IP地址集。


–service-cidr:为service VIPs指定IP地址集。


–token:用于控制平面和节点之间建立双向结构。


–token-ttl:设置token过期时间。“0”表示不过期。


–image-repository :指定拉取控制平面镜像的仓库。


–upload-certs:上传控制平面证书到kubeadm-certs Secret。

##输出以下结果说明成功
##如果失败,检查vip是否启动成功 
##kubeadm reset 后重新执行初始化
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
##这段用来配置kubectl管理集群(只要安装了kubectl就能配置管理集群)
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config ##提示没有admin.conf 就从master机器scp一份
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
 https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
##这段用来加入master节点
  kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
    --control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
##这段用来加入node节点
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f
##配置kubectl管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


master节点用这段

node节点复制这段


4.其他节点加入集群

k8s-02

kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
    --control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
##配置kubectl管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


k8s-03

kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
    --control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
##配置kubectl管理
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config


k8s-04(node节点)

kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f


如果需要在node节点管理集群 也可以同上配置kubectl管理即可


这里我就跳过


查看集群状态,NotReady说明正常,因为还没配置网络


5.配置网络calico (k8s-01执行)

wget https://docs.projectcalico.org/v3.15/manifests/calico.yaml
vim  calico.yaml
## 搜ip地址192  大概是3580行 修改成刚刚初始化的pod网段后删除掉注释 保存退出


修改后效果

kubectl apply -f calico.yaml ##等待calico的pod启动成功 需要下载镜像 
kubectl get nodes  ##查看集群网络状态
$ kubectl get nodes
NAME     STATUS   ROLES    AGE     VERSION
k8s-01   Ready    master   19m     v1.18.14
k8s-02   Ready    master   14m     v1.18.14
k8s-03   Ready    master   14m     v1.18.14
k8s-04   Ready    <none>   6m30s   v1.18.14


6.修改标签,并且清除污点让master节点能运行pod

kubectl taint nodes <node-name> node-role.kubernetes.io/master-  ##删除污点
kubectl label nodes <node-name> node-role.kubernetes.io/node=    ##给节点打标签
kubectl get nodes  ##查看集群状态
NAME     STATUS   ROLES         AGE     VERSION
k8s-01   Ready    master        19m     v1.18.14
k8s-02   Ready    master,node   14m     v1.18.14
k8s-03   Ready    master,node   14m     v1.18.14
k8s-04   Ready    node          6m30s   v1.18.14
相关实践学习
容器服务Serverless版ACK Serverless 快速入门:在线魔方应用部署和监控
通过本实验,您将了解到容器服务Serverless版ACK Serverless 的基本产品能力,即可以实现快速部署一个在线魔方应用,并借助阿里云容器服务成熟的产品生态,实现在线应用的企业级监控,提升应用稳定性。
云原生实践公开课
课程大纲 开篇:如何学习并实践云原生技术 基础篇: 5 步上手 Kubernetes 进阶篇:生产环境下的 K8s 实践 相关的阿里云产品:容器服务&nbsp;ACK 容器服务&nbsp;Kubernetes&nbsp;版(简称&nbsp;ACK)提供高性能可伸缩的容器应用管理能力,支持企业级容器化应用的全生命周期管理。整合阿里云虚拟化、存储、网络和安全能力,打造云端最佳容器化应用运行环境。 了解产品详情:&nbsp;https://www.aliyun.com/product/kubernetes
目录
相关文章
|
1天前
|
存储 运维 Kubernetes
Docker+Kubernetes/K8s+Jenkins视频资料【干货分享】
Docker+Kubernetes/K8s+Jenkins视频资料【干货分享】
Docker+Kubernetes/K8s+Jenkins视频资料【干货分享】
|
1天前
|
存储 Kubernetes Docker
Kubernetes(K8S)集群管理Docker容器(概念篇)
Kubernetes(K8S)集群管理Docker容器(概念篇)
|
11天前
|
Kubernetes Linux 网络安全
kubeadm安装k8s
该文档提供了一套在CentOS 7.6上安装Docker和Kubernetes(kubeadm)的详细步骤,包括安装系统必备软件、关闭防火墙和SELinux、禁用swap、开启IP转发、设置内核参数、配置Docker源和加速器、安装指定版本Docker、启动Docker、设置kubelet开机启动、安装kubelet、kubeadm、kubectl、下载和配置Kubernetes镜像、初始化kubeadm、创建kubeconfig文件、获取节点加入集群命令、下载Calico YAML文件以及安装Calico。这些步骤不仅适用于v1.19.14,也适用于更高版本。
64 1
|
11天前
|
Kubernetes 搜索推荐 Docker
使用 kubeadm 部署 Kubernetes 集群(二)k8s环境安装
使用 kubeadm 部署 Kubernetes 集群(二)k8s环境安装
53 17
|
23天前
|
Kubernetes Ubuntu 应用服务中间件
Ubuntu 22.04 利用kubeadm方式部署Kubernetes(v1.28.2版本)
Ubuntu 22.04 利用kubeadm方式部署Kubernetes(v1.28.2版本)
98 0
|
Kubernetes 开发者 微服务
简化Kubernetes应用部署工具-Helm之Hook
微服务和容器化给复杂应用部署与管理带来了极大的挑战。Helm是目前Kubernetes服务编排领域的唯一开源子项目,做为Kubernetes应用的一个包管理工具,可理解为Kubernetes的apt-get / yum,由Deis 公司发起,该公司已经被微软收购。
1563 0
|
Kubernetes 开发者 微服务
简化Kubernetes应用部署工具-Helm之Hook
本文讲的是简化Kubernetes应用部署工具-Helm之Hook【编者的话】微服务和容器化给复杂应用部署与管理带来了极大的挑战。Helm是目前Kubernetes服务编排领域的唯一开源子项目,做为Kubernetes应用的一个包管理工具,可理解为Kubernetes的apt-get / yum,由Deis 公司发起,该公司已经被微软收购。
2521 0
|
3天前
|
存储 运维 Kubernetes
Kubernetes 集群的持续性能优化实践
【4月更文挑战第22天】在动态且复杂的微服务架构中,确保 Kubernetes 集群的高性能运行是至关重要的。本文将深入探讨针对 Kubernetes 集群性能优化的策略与实践,从节点资源配置、网络优化到应用部署模式等多个维度展开,旨在为运维工程师提供一套系统的性能调优方法论。通过实际案例分析与经验总结,读者可以掌握持续优化 Kubernetes 集群性能的有效手段,以适应不断变化的业务需求和技术挑战。
17 4
|
1月前
|
Prometheus 监控 Kubernetes
Kubernetes 集群监控与日志管理实践
【2月更文挑战第29天】 在微服务架构日益普及的当下,Kubernetes 已成为容器编排的事实标准。然而,随着集群规模的扩大和业务复杂度的提升,有效的监控和日志管理变得至关重要。本文将探讨构建高效 Kubernetes 集群监控系统的策略,以及实施日志聚合和分析的最佳实践。通过引入如 Prometheus 和 Fluentd 等开源工具,我们旨在为运维专家提供一套完整的解决方案,以保障系统的稳定性和可靠性。
|
22天前
|
数据库 存储 监控
什么是 SAP HANA 内存数据库 的 Delta Storage
什么是 SAP HANA 内存数据库 的 Delta Storage
16 0
什么是 SAP HANA 内存数据库 的 Delta Storage

推荐镜像

更多