前言
- v1.24.0 - v1.26.0 之前支持docker,但是需要额外安装cri-docker来充当垫片
- 截至作者更新目前v1.26.0暂时不支持docker方式,因此本次采用containerd
- 由于工作原因作者会同时使用Ubuntu和CentOS,因此本次将两个系统的K8S安装一起记录一下
- 使用kubeadm部署3主2从高可用集群
- etcd采用二进制部署,复用3个管理节点
- 本次部署无vip,采用nginx做负载均衡
理想拓扑图如下
准备工作
准备5台虚拟机
虚拟机建议同时使用相同操作系统并配置好正确的IP地址
初始化操作
大部分步骤只需要在管理节点1操作
记录一下规划的配置文件,避免后面写错
cat <<EOF > /opt/k8s_env.sh #!/bin/bash # k8s节点网段,方便做chronyd对时 NODEIPS=192.168.3.0/24 # k8s集群所有节点 HOSTS=(master1 master2 master3 node1 node2) # k8s管理节点 MASTERS=(master1 master2 master3) # k8s工作节点 WORKS=(master1 master2 master3 node1 node2) # 每个节点对应的IP地址 master1=192.168.3.201 master2=192.168.3.202 master3=192.168.3.203 node1=192.168.3.204 node2=192.168.3.205 # 节点root密码,方便脚本自动免密 export SSHPASS=1 # 配置kubectl自动补全 #source <(kubeadm completion bash) #source <(kubectl completion bash) #source <(crictl completion bash) # 服务网段(Service CIDR),部署前路由不可达,部署后集群内部使用IP:Port可达 SERVICE_CIDR="10.100.0.0/16" # clusterDNS地址,部署前路由不可达,部署后集群内部使用IP:Port可达,需要在Service CIDR中可达,一般建议选用第10个地址 CLUSTER_KUBERNETES_SVC_IP="10.100.0.10" # Pod 网段(Cluster CIDR),部署前路由不可达,部署后路由可达(flanneld 保证) CLUSTER_CIDR="172.31.0.0/16" # 服务端口范围(NodePort Range) NODE_PORT_RANGE="30000-40000" # etcd集群服务地址列表(默认复用3个master节点) ETCD_ENDPOINTS="https://\$master1:2379,https://\$master2:2379,https://\$master3:2379" # etcd集群服务地址列表(默认复用3个master节点) ETCD_CLUSTERS="master1=https://\$master1:2380,master2=https://\$master2:2380,master3=https://\$master3:2380" EOF
Centos
配置yum源
- 配置基础yum源为阿里源(后续安装基础软件包)
mkdir /opt/yum_bak && mv /etc/yum.repos.d/* /opt/yum_bak/ # 备份原有的repo curl -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo yum -y install epel-release sed -i "s/#baseurl/baseurl/g" /etc/yum.repos.d/epel.repo sed -i "s/metalink/#metalink/g" /etc/yum.repos.d/epel.repo sed -i "s@https\?://download.fedoraproject.org/pub@https://repo.huaweicloud.com@g" /etc/yum.repos.d/epel.repo # curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo # curl -o /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo # 阿里貌似限速了,最近用阿里的源老慢了
- 添加启用源(后续更新内核)
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo
- 添加docker源(用于安装containerd)
yum install -y yum-utils device-mapper-persistent-data lvm2 curl -o /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo # Step 3 sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
- 添加k8s源(用于安装kubeadm、kubelet、kubectl)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://repo.huaweicloud.com/kubernetes/yum/repos/kubernetes-el7-\$basearch enabled=1 gpgcheck=1 repo_gpgcheck=0 gpgkey=https://repo.huaweicloud.com/kubernetes/yum/doc/yum-key.gpg https://repo.huaweicloud.com/kubernetes/yum/doc/rpm-package-key.gpg EOF # 提示 若您使用的yum中变量 $basearch 无法解析, 请把第二步配置文件中的$basearch修改为相应系统架构(aarch64/armhfp/ppc64le/s390x/x86_64).
- 建立yum缓存
yum clean all && yum makecache fast
配置免密、修改hostname、关闭防火墙、selinux、关闭swap分区(方便后面进行其它操作)
- 修改hosts文件
source /opt/k8s_env.sh for host in ${HOSTS[@]};do echo "$(eval echo "$"$host) $host" >> /etc/hosts;done # 执行本命令之前记得先修改配置文件/opt/k8s_env.sh
- 修改hostname、关闭防火墙、selinux、关闭swap分区
source /opt/k8s_env.sh yum -y install sshpass ssh-keygen -t rsa -b 2048 -P "" -f /root/.ssh/id_rsa -q for host in ${HOSTS[@]};do #sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no $host sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $host ssh $host "hostnamectl set-hostname $host" ssh $host "systemctl disable --now firewalld" ssh $host "setenforce 0" ssh $host "sed -ri '/^SELINUX=/cSELINUX=disabled' /etc/selinux/config" ssh $host "sed -i 's@.*swap.*@#&@g' /etc/fstab" ssh $host "swapoff -a" scp /etc/hosts $host:/etc/hosts done
下载软件包并批量安装
- 下载软件包至/opt/rpm_dir方便后续一起安装
yumdownloader --resolve --destdir /opt/rpm_dir wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl ipvsadm ipset sysstat conntrack libseccomp chrony # 常用基础软件包 yumdownloader --resolve --destdir /opt/rpm_dir kernel-ml --enablerepo=elrepo-kernel # 新版本ml内核软件包 yumdownloader --resolve --destdir /opt/rpm_dir containerd.io --disableexcludes=docker-ce # containerd软件包 yumdownloader --resolve --destdir /opt/rpm_dir kubelet-1.26.0-0 kubectl-1.26.0-0 kubeadm-1.26.0-0 --disableexcludes=kubernetes # 1.26.0的kubelet、kubectl、kubeadm软件包
- 所有节点都安装以上软件包
source /opt/k8s_env.sh for host in ${HOSTS[@]};do scp /etc/yum.repos.d/*.repo $host:/etc/yum.repos.d/ scp -r /opt/rpm_dir $host:/tmp/ ssh $host "yum -y localinstall /tmp/rpm_dir/*" ssh $host "rm -rf /tmp/rpm_dir/" #ssh $host "echo 'export LC_ALL=en_US.UTF-8' >> ~/.bashrc" # 如果习惯中文可以将这句的注释去掉 done
- 升级内核(节点会重启)
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $host "rpm -qa|grep kernel" #ssh $host " awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg" echo $host echo "" ssh $host "grub2-set-default 0" done for host in ${HOSTS[@]};do if [[ $host == $(hostname) ]];then continue fi ssh $host reboot done init 6
配置时间同步
计划master1同步阿里ntp服务器,其余节点同步master1
cat > /etc/chrony.conf <<EOF server ntp.aliyun.com iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync allow $NODEIPS local stratum 10 keyfile /etc/chrony.keys leapsectz right/UTC logdir /var/log/chrony EOF cat > /opt/chrony.conf <<EOF server $master1 iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync local stratum 10 keyfile /etc/chrony.keys leapsectz right/UTC logdir /var/log/chrony EOF
分发chrony配置文件
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $host "ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime" if [[ $host == $(hostname) ]];then ssh $host "systemctl restart chronyd" continue fi scp /opt/chrony.conf $host:/etc/chrony.conf ssh $host " systemctl restart chronyd" done sleep 3 for host in ${HOSTS[@]};do ssh $host "timedatectl" ssh $host "chronyc sources -v" done
配置打开文件描述符
cat <<EOF > /etc/security/limits.conf * soft nofile 655360 * hard nofile 131072 * soft nproc 655350 * hard nproc 655350 * seft memlock unlimited * hard memlock unlimited EOF source /opt/k8s_env.sh for host in ${HOSTS[@]};do scp /etc/security/limits.conf $host:/etc/security/limits.conf done
添加ipvs模块和内核模块
- 添加ipvs模块
cat <<EOF > /etc/modules-load.d/ipvs.conf ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp ip_vs_sh nf_conntrack #内核小于4.18,把这行改成nf_conntrack_ipv4 ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT ipip overlay br_netfilter EOF
- 添加内核模块
cat <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 net.ipv4.tcp_keepalive_time = 600 net.ipv4.tcp_keepalive_probes = 3 net.ipv4.tcp_keepalive_intvl =15 net.ipv4.tcp_max_tw_buckets = 36000 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_max_orphans = 327680 net.ipv4.tcp_orphan_retries = 3 net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.ip_conntrack_max = 65536 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.tcp_timestamps = 0 net.core.somaxconn = 16384 vm.swappiness=0 EOF
- 进行配置文件分发
source /opt/k8s_env.sh for host in ${WORKS[@]};do scp /etc/modules-load.d/ipvs.conf $host:/etc/modules-load.d/ipvs.conf scp /etc/sysctl.d/k8s.conf $host:/etc/sysctl.d/k8s.conf ssh $host "systemctl restart systemd-modules-load.service" ssh $host "sysctl --system" done
Ubuntu
配置apt源
- 基础apt源
mv /etc/apt/{sources.list,sources.list.bak} # 备份现有的apt源 cat <<EOF > /etc/apt/sources.list deb http://repo.huaweicloud.com/ubuntu focal main restricted deb http://repo.huaweicloud.com/ubuntu focal-updates main restricted deb http://repo.huaweicloud.com/ubuntu focal universe deb http://repo.huaweicloud.com/ubuntu focal-updates universe deb http://repo.huaweicloud.com/ubuntu focal multiverse deb http://repo.huaweicloud.com/ubuntu focal-updates multiverse deb http://repo.huaweicloud.com/ubuntu focal-backports main restricted universe multiverse deb http://repo.huaweicloud.com/ubuntu focal-security main restricted deb http://repo.huaweicloud.com/ubuntu focal-security universe deb http://repo.huaweicloud.com/ubuntu focal-security multiverse EOF sudo apt-get update
- 添加docker源(用于安装containerd)
curl -fsSL https://repo.huaweicloud.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository "deb [arch=amd64] https://repo.huaweicloud.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
- 添加k8s源(用于安装kubeadm、kubelet、kubectl)
cat <<EOF > /etc/apt/sources.list.d/kubernetes.list deb https://repo.huaweicloud.com/kubernetes/apt/ kubernetes-xenial main EOF curl -s https://repo.huaweicloud.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
配置免密、修改hostname、关闭防火墙、关闭swap分区(方便后面进行其它操作)
- 修改hosts文件
source /opt/k8s_env.sh for host in ${HOSTS[@]};do echo "$(eval echo "$"$host) $host" >> /etc/hosts;done # 执行本命令之前记得先修改配置文件/opt/k8s_env.sh
- 修改hostname、关闭防火墙、关闭swap分区
source /opt/k8s_env.sh apt -y install sshpass ssh-keygen -t rsa -b 2048 -P "" -f /root/.ssh/id_rsa -q for host in ${HOSTS[@]};do #sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no $host sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $host ssh $host "hostnamectl set-hostname $host" ssh $host "systemctl disable --now ufw" ssh $host "sed -i 's@.*swap.*@#&@g' /etc/fstab" ssh $host "swapoff -a" scp /etc/hosts $host:/etc/hosts done
下载软件包并批量安装
- 本来想下载软件包然后一起dpkg安装的,但是总是少安装包,就还是直接安装了
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $host "apt -y install net-tools ipvsadm ipset conntrack chrony " ssh $host "apt -y install kubelet=1.26.0-00 kubeadm=1.26.0-00 kubectl=1.26.0-00" ssh $host "apt -y install containerd.io" done
配置时间同步
计划master1同步阿里ntp服务器,其余节点同步master1
cat > /etc/chrony/chrony.conf <<EOF server ntp.aliyun.com iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync allow $NODEIPS local stratum 10 keyfile /etc/chrony/chrony.keys leapsectz right/UTC logdir /var/log/chrony EOF cat > /opt/chrony.conf <<EOF server $master1 iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync local stratum 10 keyfile /etc/chrony/chrony.keys leapsectz right/UTC logdir /var/log/chrony EOF
分发chrony配置文件
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $host "ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime" if [[ $host == $(hostname) ]];then ssh $host "systemctl restart chrony" continue fi scp /opt/chrony.conf $host:/etc/chrony/chrony.conf ssh $host " systemctl restart chrony" done sleep 3 for host in ${HOSTS[@]};do ssh $host "systemctl enable chrony" ssh $host "timedatectl" ssh $host "chronyc sources -v" done
配置打开文件描述符
cat <<EOF > /etc/security/limits.conf * soft nofile 655360 * hard nofile 131072 * soft nproc 655350 * hard nproc 655350 * seft memlock unlimited * hard memlock unlimited EOF source /opt/k8s_env.sh for host in ${HOSTS[@]};do scp /etc/security/limits.conf $host:/etc/security/limits.conf done
添加ipvs模块和内核模块
- 添加ipvs模块
cat <<EOF > /etc/modules-load.d/ipvs.conf ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp ip_vs_sh nf_conntrack #内核小于4.18,把这行改成nf_conntrack_ipv4 ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT ipip overlay br_netfilter EOF
- 添加内核模块
cat <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 net.ipv4.tcp_keepalive_time = 600 net.ipv4.tcp_keepalive_probes = 3 net.ipv4.tcp_keepalive_intvl =15 net.ipv4.tcp_max_tw_buckets = 36000 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_max_orphans = 327680 net.ipv4.tcp_orphan_retries = 3 net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.ip_conntrack_max = 65536 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.tcp_timestamps = 0 net.core.somaxconn = 16384 vm.swappiness=0 EOF
- 进行配置文件分发
source /opt/k8s_env.sh for host in ${WORKS[@]};do scp /etc/modules-load.d/ipvs.conf $host:/etc/modules-load.d/ipvs.conf scp /etc/sysctl.d/k8s.conf $host:/etc/sysctl.d/k8s.conf ssh $host "systemctl restart systemd-modules-load.service" ssh $host "sysctl --system" done