一.K8S集群基础环境准备
1.配置主机解析
cat >> /etc/hosts <<EOF
10.0.0.66 k8s66
10.0.0.77 k8s77
10.0.0.88 k8s88
10.0.0.99 api-server
EOF
2.禁用不必要的服务
2.1 禁用防火墙,网络管理,邮箱
systemctl disable --now firewalld NetworkManager postfix
2.2 禁用selinux
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
grep ^SELINUX= /etc/selinux/config
2.3 禁用swap分区
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
grep swap /etc/fstab
3.Linux基础优化
3.1 修改sshd服务优化
sed -ri 's@^#UseDNS yes@UseDNS no@g' /etc/ssh/sshd_config
sed -ri 's#^GSSAPIAuthentication yes#GSSAPIAuthentication no#g' /etc/ssh/sshd_config
grep ^UseDNS /etc/ssh/sshd_config
grep ^GSSAPIAuthentication /etc/ssh/sshd_config
3.2 修改文件打开数量的限制(退出当前会话立即生效)
cat > /etc/security/limits.d/k8s.conf <<'EOF'
* soft nofile 65535
* hard nofile 131070
EOF
ulimit -Sn
ulimit -Hn
3.3 修改终端颜色
cat <<EOF >> ~/.bashrc
PS1='[\[\e[34;1m\]\u@\[\e[0m\]\[\e[32;1m\]\H\[\e[0m\]\[\e[31;1m\] \W\[\e[0m\]]# '
EOF
source ~/.bashrc
3.4 所有节点配置模块自动加载,此步骤不做的话(kubeadm init时会直接失败!)
modprobe br_netfilter
modprobe ip_conntrack
cat >>/etc/rc.sysinit<<EOF
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
EOF
echo "modprobe br_netfilter" >/etc/sysconfig/modules/br_netfilter.modules
echo "modprobe ip_conntrack" >/etc/sysconfig/modules/ip_conntrack.modules
chmod 755 /etc/sysconfig/modules/br_netfilter.modules
chmod 755 /etc/sysconfig/modules/ip_conntrack.modules
lsmod | grep br_netfilter
3.5 基于chronyd守护进程实现集群时间同步
3.5.1 手动同步时区和时间
\cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
3.5.2 安装服务chrony
yum -y install ntpdate chrony
3.5.3 修改配置文件
vim /etc/chrony.conf
...
server ntp.aliyun.com iburst
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst
server ntp4.aliyun.com iburst
server ntp5.aliyun.com iburst
3.5.4 启动服务
systemctl enable --now chronyd
3.5.5 查看服务状态
systemctl status chronyd
chronyc activity -v
4.配置软件源并安装集群常用软件
4.1 配置阿里源
curl -s -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -s -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
4.2 配置K8S软件源
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF
参考连接:
https://developer.aliyun.com/mirror/kubernetes
4.3 安装集群常用软件
yum -y install expect wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git ntpdate chrony bind-utils rsync unzip git
4.4 下载配置文件及脚本
git clone https://gitee.com/jasonyin2020/oldboyedu-linux-Cloud_Native
5.k8s66配置免密钥登录集群并配置同步脚本
k8s66节点免密钥登录集群节点,安装过程中生成配置文件和证书均在k8s66上操作,集群管理也在k8s66上操作。
阿里云或者AWS上需要单独一台kubectl服务器。密钥配置如下:
5.1 配置批量免密钥登录
cat > password_free_login.sh <<'EOF'
#!/bin/bash
# auther: Jason Yin
# 创建密钥对
ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa -q
# 声明你服务器密码,建议所有节点的密码均一致,否则该脚本需要再次进行优化
export mypasswd=yinzhengjie
# 定义主机列表
k8s_host_list=(k8s66 k8s77 k8s88)
# 配置免密登录,利用expect工具免交互输入
for i in ${k8s_host_list[@]};do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
expect {
\"*yes/no*\" {send \"yes\r\"; exp_continue}
\"*password*\" {send \"$mypasswd\r\"; exp_continue}
}"
done
EOF
sh password_free_login.sh
5.2 编写同步脚本
cat > /usr/local/sbin/data_rsync.sh <<'EOF'
#!/bin/bash
# Auther: Jason Yin
if [ $# -ne 1 ];then
echo "Usage: $0 /path/to/file(绝对路径)"
exit
fi
if [ ! -e $1 ];then
echo "[ $1 ] dir or file not find!"
exit
fi
fullpath=`dirname $1`
basename=`basename $1`
cd $fullpath
k8s_host_list=(k8s66 k8s77 k8s88)
for host in ${k8s_host_list[@]};do
tput setaf 2
echo ===== rsyncing ${host}: $basename =====
tput setaf 7
rsync -az $basename `whoami`@${host}:$fullpath
if [ $? -eq 0 ];then
echo "命令执行成功!"
fi
done
EOF
chmod +x /usr/local/sbin/data_rsync.sh
5.3 同步数据到其他节点
[root@k8s66 ~]# data_rsync.sh /root/oldboyedu-linux-Cloud_Native/
6.所有节点安装ipvsadm以实现kube-proxy的负载均衡
6.1 CentOS7需要升级内核版本为4.18+
cd /root/oldboyedu-linux-Cloud_Native/kernel/4.19.12 && yum -y localinstall kernel-ml*
6.2 更改内核的启动顺序
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
6.3 检查默认的内核版本
grubby --default-kernel
6.4 重启操作系统
reboot
6.5 检查当前正在使用的内核版本
uname -r
7.所有节点安装ipvsadm以实现kube-proxy的负载均衡
7.1 安装ipvsadm等相关工具
yum -y install ipvsadm ipset sysstat conntrack libseccomp
7.2 创建要开机自动加载的模块配置文件
cat > /etc/modules-load.d/ipvs.conf << 'EOF'
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
7.3 将"systemd-modules-load"服务设置为开机自启动
systemctl enable --now systemd-modules-load && systemctl status systemd-modules-load
7.4 启动模块
lsmod | grep --color=auto -e ip_vs -e nf_conntrack
8.所有节点修改Linux内核参数调优
8.1 所有节点修改Linux内核参数调优
cat > /etc/sysctl.d/k8s.conf <<'EOF'
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
8.2 重启虚拟机
reboot
8.3 拍快照
如果所有节点都可以正常重启,说明我们的配置是正确的!接下来就是拍快照。
二.单独部署containerd
1.Kubernetes容器运行时弃用Docker转型Containerd
推荐阅读:
https://i4t.com/5435.html
2.所有节点部署containerd服务
2.1 升级libseccomp版本
在centos7中yum下载libseccomp的版本是2.3的,版本不满足我们最新containerd的需求。
综上所属,在安装containerd前,我们需要优先升级libseccomp,需要下载2.4以上的版本即可,我这里部署2.5.1版本。
2.2 卸载旧的containerd
rpm -e libseccomp-2.3.1-4.el7.x86_64 --nodeps
2.3 下载libseccomp-2.5.1版本的软件包
wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
2.4 安装libseccomp-2.5.1软件包
rpm -ivh libseccomp-2.5.1-1.el8.x86_64.rpm
rpm -ivh /root/oldboyedu-linux-Cloud_Native/containerd/libseccomp-2.5.1-1.el8.x86_64.rpm
2.5 检查安装的版本,安装成功啦
rpm -qa | grep libseccomp
3.安装containerd组件
3.1 下载containerd工具包
wget https://github.com/containerd/containerd/releases/download/v1.6.4/cri-containerd-cni-1.6.4-linux-amd64.tar.gz
3.2 解压软件包(此处我们直接让它给我们对应的目录给替换掉)
tar zxvf cri-containerd-cni-1.6.4-linux-amd64.tar.gz -C /
4.配置containerd
4.1 创建配置文件目录
mkdir -pv /etc/containerd
4.2 生成默认配置文件
containerd config default > /etc/containerd/config.toml
5.替换默认pause镜像地址
sed -i 's/k8s.gcr.io/registry.cn-beijing.aliyuncs.com\/abcdocker/' /etc/containerd/config.toml
grep sandbox_image /etc/containerd/config.toml
6.配置systemd作为容器的cgroup driver
sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/' /etc/containerd/config.toml
grep SystemdCgroup /etc/containerd/config.toml
7.配置containerd开机自启动
7.1 启动containerd服务并配置开机自启动
systemctl enable --now containerd
7.2 查看containerd状态
systemctl status containerd
7.3 查看containerd的版本
ctr version
三.负载均衡配置
1.k8s66节点编译安装nginx,后续需要使用到upstream模块
1.1 所有的master节点创建运行nginx的用户
useradd nginx -s /sbin/nologin -M
1.2 安装依赖
yum -y install pcre pcre-devel openssl openssl-devel gcc gcc-c++ automake autoconf libtool make
1.3 下载nginx软件包
wget http://nginx.org/download/nginx-1.21.6.tar.gz
1.4 解压软件包
tar xf nginx-1.21.6.tar.gz
# tar xf oldboyedu-linux-Cloud_Native/nginx+keepalived/nginx-1.21.6.tar.gz
1.5 配置nginx
cd nginx-1.21.6
./configure --prefix=/usr/local/nginx/ \
--with-pcre \
--with-http_ssl_module \
--with-http_stub_status_module \
--with-stream \
--with-http_stub_status_module \
--with-http_gzip_static_module
1.6 编译并安装nginx
make -j 4 && make install
1.7 使用systemctl管理,并设置开机启动
cat >/usr/lib/systemd/system/nginx.service <<'EOF'
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target sshd-keygen.service
[Service]
Type=forking
EnvironmentFile=/etc/sysconfig/sshd
ExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s stop
Restart=on-failure
RestartSec=42s
[Install]
WantedBy=multi-user.target
EOF
1.8 检查nginx服务是否启动
systemctl status nginx
ps -ef|grep nginx
1.9 同步nginx软件包和脚本到集群的其他master节点
data_rsync.sh /usr/local/nginx/
data_rsync.sh /usr/lib/systemd/system/nginx.service
2.k8s66节点配置nginx
2.1 编辑nginx配置文件
cat > /usr/local/nginx/conf/nginx.conf <<'EOF'
user nginx nginx;
worker_processes auto;
events {
worker_connections 20240;
use epoll;
}
error_log /var/log/nginx_error.log info;
stream {
upstream kube-servers {
hash $remote_addr consistent;
server k8s66:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s77:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s88:6443 weight=5 max_fails=1 fail_timeout=3s;
}
server {
listen 8443 reuseport;
proxy_connect_timeout 3s;
proxy_timeout 3000s;
proxy_pass kube-servers;
}
}
EOF
2.2 同步nginx的配置文件到其他master节点
data_rsync.sh /usr/local/nginx/conf/nginx.conf
2.3 所有节点启动nginx服务
systemctl enable --now nginx
3.部署keepalived
3.1 安装keepalived组件
yum -y install keepalived
3.2 修改keepalive的配置文件(根据实际环境,interface eth0可能需要修改为interface ens33)
3.2.1 编写配置文件,各个master节点需要修改router_id和mcast_src_ip的值即可。
3.2.1.1 k8s66节点
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id 10.0.0.66
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 8443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 100
priority 100
advert_int 1
mcast_src_ip 10.0.0.66
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.99
}
}
EOF
3.2.1.1 k8s77节点
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id 10.0.0.77
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 8443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 100
priority 100
advert_int 1
mcast_src_ip 10.0.0.77
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.99
}
}
EOF
3.2.1.1 k8s88节点
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id 10.0.0.88
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 8443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 100
priority 100
advert_int 1
mcast_src_ip 10.0.0.88
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.99
}
}
EOF
3.2.2 各节点编写健康检查脚本
vi /etc/keepalived/check_port.sh
iCHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=\`ss -lt|grep $CHK_PORT|wc -l\`
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used,End."
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi
3.3 启动keepalived
systemctl enable --now keepalived
3.4 测试keepalived
ip a # 查看VIP在哪个节点
systemct stop keepalived # 停止服务,观察是否飘逸VIP
3.5 参数说明
温馨提示:
router_id:
节点ip,master每个节点配置自己的IP
mcast_src_ip:
节点IP,master每个节点配置自己的IP
virtual_ipaddress:
虚拟IP,即VIP。
interface:
指定接口的名称。
virtual_router_id:
有效值为0-255,可以理解为一个组ID,只有相同的ID才被确认为一个组。
如果每个keepalived实例修改的ID不一致,则会出现各自有一个VIP的现象。
四.kubeadm组件初始化K8S集群
1.安装kubeadm
1.1 k8s66节点安装kubeadm和master相关依赖组建
yum install -y kubelet-1.28.1 kubeadm-1.28.1 kubectl-1.28.1
1.2 所有节点kubelet设置成开机启动
systemctl enable --now kubelet
systemctl status kubelet
1.3 检查kubectl工具版本号
kubectl version --client --output=yaml
2.配置kubeadm文件
2.1 在k8s66节点上配置打印init默认配置信息
kubeadm config print init-defaults > kubeadm-init.yaml
2.2 据默认的配置格式进行自定义修改即可
[root@k8s66 oldboyedu-linux-Cloud_Native]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: oldboy.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# k8s66的IP地址
advertiseAddress: 10.0.0.66
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s66
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
# imageRepository: k8s.gcr.io
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.0
controlPlaneEndpoint: api-server:8443
networking:
dnsDomain: cluster.local
serviceSubnet: 10.200.0.0/16
podSubnet: 10.100.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy 模式
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
clusterDNS:
- 10.200.0.254
# clusterDomain: cluster.local
clusterDomain: oldboyedu.com
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
# 配置 cgroup driver
cgroupDriver: systemd
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
[root@k8s66 oldboyedu-linux-Cloud_Native]#
2.3 检查配置文件是否有错误
kubeadm init --config kubeadm-init.yaml --dry-run
参考链接:
https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/
3.基于kubeadm配置文件初始化集群
[root@k8s66 oldboyedu-linux-Cloud_Native]# kubeadm init --config kubeadm-init.yaml --upload-certs
...
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join api-server:8443 --token oldboy.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f1c52a63da2c3ed3b494f05b0cd2a19301ac6c81cdb62cc99668f3c991080a61 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join api-server:8443 --token oldboy.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f1c52a63da2c3ed3b494f05b0cd2a19301ac6c81cdb62cc99668f3c991080a61
[root@k8s66 oldboyedu-linux-Cloud_Native]#
温馨提示:
如果你手快,没有执行"--upload-certs"参数,后期需要手动上传证书。
这很简单,只需要参考"五.1"即可。
4.复制kubectl的kubeconfig
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
5.查看configmap资源
kubectl -n kube-system get cm kubeadm-config -o yaml
6.查看node资源
[root@k8s66 oldboyedu-linux-Cloud_Native]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s66 Ready control-plane 8m1s v1.28.1
[root@k8s66 oldboyedu-linux-Cloud_Native]#
五.多master和worker加入集群
1.将控制面板证书文件上传到k8s集群定义为secrets资源。
[root@k8s66 ~]# kubeadm init phase upload-certs --upload-certs
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
58aea750d020038fc8e682af195803d4c8f842c7eaaa49ef77fa354d1fc77db3
[root@k8s66 ~]#
注意,此处得到了一个"58aea750d020038fc8e682af195803d4c8f842c7eaaa49ef77fa354d1fc77db3"值很有用。
2.所有master节点加入集群,需要用到上面的token信息,需要使用"--certificate-key"选项指定。(你的环境要替换!)
kubeadm join api-server:8443 --token oldboy.0123456789abcdef --discovery-token-ca-cert-hash sha256:f1c52a63da2c3ed3b494f05b0cd2a19301ac6c81cdb62cc99668f3c991080a61 --control-plane --certificate-key 58aea750d020038fc8e682af195803d4c8f842c7eaaa49ef77fa354d1fc77db3
3.所有的master节点配置kubeconfig认证文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
4.查看所有节点。
[root@k8s66 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s66 Ready control-plane 22m v1.28.1
k8s77 Ready control-plane 6m13s v1.28.1
k8s88 Ready control-plane 18s v1.28.1
[root@k8s66 ~]#
[root@k8s77 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s66 Ready control-plane 22m v1.28.1
k8s77 Ready control-plane 6m7s v1.28.1
k8s88 Ready control-plane 22s v1.28.1
[root@k8s77 ~]#
[root@k8s88 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s66 Ready control-plane 22m v1.28.1
k8s77 Ready control-plane 6m29s v1.28.1
k8s88 Ready control-plane 44s v1.28.1
[root@k8s88 ~]#
5.如果有多余的worker节点,可以执行如下名利加入集群
kubeadm join api-server:8443 --token oldboy.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f1c52a63da2c3ed3b494f05b0cd2a19301ac6c81cdb62cc99668f3c991080a61
六.部署网络插件
1.下载网络插件
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
2.修改网络插件
[root@k8s66 flannel]# vim kube-flannel.yml
apiVersion: v1
data:
...
net-conf.json: |
{
"Network": "10.100.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
3.应用flannel配置文件
[root@k8s66 flannel]# kubectl apply -f kube-flannel.yml
4.配置自动补全
yum –y install bash-completion
kubectl completion bash > ~/.kube/completion.bash.inc
echo "source '$HOME/.kube/completion.bash.inc'" >> $HOME/.bash_profile
source $HOME/.bash_profile
5.部署deloyments测试
[root@k8s66 ~]# cat 01-deploy-matchLabels-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
containers:
- name: nginx
image: nginx:1.20.1-alpine
ports:
- containerPort: 80
[root@k8s66 ~]#