一、部署详解
1.1 架构
正文 • 1
1.2 初始化环境(所有节点)
#所有节点操作,关闭防火墙、selinux、swap交换、添加本地域名解析、调整内核参数、开启时间同步 systemctl stop firewalld systemctl disable firewalld iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X setenforce 0 sed -i 's/enforcing/disabled/' /etc/selinux/config swapoff -a sed -ri 's/.*swap.*/#&/' /etc/fstab cat >> /etc/hosts << EOF 192.168.13.10 master01 192.168.13.20 node01 192.168.13.30 node02 192.168.13.40 master02 192.168.13.50 nginx01 192.168.13.60 nginx02 EOF cat > /etc/sysctl.d/k8s.conf << EOF #开启网桥模式,可将网桥的流量传递给iptables链 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 #关闭ipv6协议 net.ipv6.conf.all.disable_ipv6=1 net.ipv4.ip_forward=1 EOF sysctl --system yum install ntpdate -y ntpdate time.windows.com
1.3 部署etcd集群(命令)
#在 master01 节点上操作 hostnamectl set-hostname master01 su #准备cfssl证书生成工具(从官网下载,也可以提前下载后直接上传,我采用下载后上传) #wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl #wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson #wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo #若出现:无法建立 SSL 连接。这个报错,则将https换为http即可! cd /usr/local/bin/ #上传证书到/usr/local/bin/目录中 chmod +x /usr/local/bin/cfssl* #通过脚本生成CA证书、etcd 服务器证书以及私钥 mkdir -p /opt/etcd/etcd-cert cd /opt/etcd/etcd-cert/ #上传 etcd-cert.sh和etcd.sh脚本(需修改脚本IP地址) chmod +x etcd-cert.sh etcd.sh ./etcd-cert.sh #若证书生成成功,则会多出以下文件(ls查看) #ca-config.json ca-csr.json ca.pem server.csr server-key.pem #ca.csr ca-key.pem etcd-cert.sh server-csr.json server.pem #准备启动etcd服务的相关文件 mkdir -p /opt/etcd/{cfg,bin,ssl} cd /opt/etcd/ #上传 etcd-v3.4.9-linux-amd64.tar.gz 到 /opt/etcd 目录中,启动etcd服务 tar xf etcd-v3.4.9-linux-amd64.tar.gz mv /opt/etcd/etcd-v3.4.9-linux-amd64/etcd* /opt/etcd/bin/ mv /opt/etcd/etcd-cert/*.pem /opt/etcd/ssl/ cd /opt/etcd/etcd-cert/ ./etcd.sh etcd01 192.168.13.10 etcd02=https://192.168.13.20:2380,etcd03=https://192.168.13.30:2380 #若你的脚本和我的不一样,最后加了:systemctl restart etcd启动etcd服务,则会报错,但是不会影响程序 ps -ef | grep etcd scp -r /opt/etcd/ root@192.168.13.20:/opt/ scp -r /opt/etcd/ root@192.168.13.30:/opt/ scp /usr/lib/systemd/system/etcd.service root@192.168.13.20:/usr/lib/systemd/system/ scp /usr/lib/systemd/system/etcd.service root@192.168.13.30:/usr/lib/systemd/system/ #在所有node节点操作,修改配置文件为对应的节点 hostnamectl set-hostname node01 su vim /opt/etcd/cfg/etcd #[Member] ETCD_NAME="etcd02" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.13.20:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.13.20:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.13.20:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.13.20:2379" ETCD_INITIAL_CLUSTER="etcd01=https://192.168.13.10:2380,etcd02=https://192.168.13.20:2380,etcd03=https://192.168.13.30:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" systemctl start etcd systemctl enable etcd systemctl status etcd #回到master01节点,启动etcd服务 systemctl restart etcd #检查etcd群集状态(两条命令,哪个都行,启动成功则会出现3个true或3个started) ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.13.10:2379,https://192.168.13.20:2379,https://192.168.13.30:2379" endpoint health --write-out=table ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.13.10:2379,https://192.168.13.20:2379,https://192.168.13.30:2379" --write-out=table member list #至此,etcd集群部署完毕
1.3 部署etcd集群(图解)
1.4 master01节点部署相关组件(命令)
#在 master01节点上操作,生成证书 mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs,k8s-cert} cd /opt/kubernetes/ #上传k8s-cert.sh,master.zip,kubernetes-server-linux-amd64.tar.gz到/opt/kubernetes目录 #五个脚本,涉及路径指向,若是修改路径,则需要将路径对应上(报错会显示多少行) unzip master.zip tar xf kubernetes-server-linux-amd64.tar.gz chmod +x *.sh #创建用于生成CA证书、相关组件的证书和私钥的目录 mv /opt/kubernetes/k8s-cert.sh /opt/kubernetes/k8s-cert cd /opt/kubernetes/k8s-cert/ ./k8s-cert.sh #若证书生成成功,则会多出以下文件(ls *.pem查看) #admin-key.pem apiserver-key.pem ca-key.pem kube-proxy-key.pem #admin.pem apiserver.pem ca.pem kube-proxy.pem cp ca*pem apiserver*pem /opt/kubernetes/ssl/ cd /opt/kubernetes/kubernetes/server/bin cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/ ln -s /opt/kubernetes/bin/* /usr/local/bin/ #创建 bootstrap token 认证文件,apiserver 启动时会调用,然后就相当于在集群内创建了一个这个用户,接下来就可以用 RBAC 给他授权 cd /opt/kubernetes/ vim token.sh #!/bin/bash #获取随机数前16个字节内容,以十六进制格式输出,并删除其中空格 BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') #生成 token.csv 文件,按照 Token序列号,用户名,UID,用户组 的格式生成 cat > /opt/kubernetes/cfg/token.csv <<EOF ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF chmod +x token.sh ./token.sh #查看roken.csv是否生成成功 cat /opt/kubernetes/cfg/token.csv cd /opt/kubernetes/ #启动kube-apiserver、scheduler、controller-manager三项服务 ./apiserver.sh 192.168.13.10 https://192.168.13.10:2379,https://192.168.13.20:2379,https://192.168.13.30:2379 ./scheduler.sh ./controller-manager.sh ./admin.sh #可通过ps过滤查看程序是否启动成功 #ps aux | grep kube-apiserver #ps aux | grep kube-scheduler #ps aux | grep kube-controller-manager #生成kubectl连接集群的证书 kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous #通过kubectl工具查看当前集群组件状态(成功会显示manager、scheduler为ok) kubectl get cs
1.4 master01节点组件部署(截图)
1.5 work node节点组件部署(命令)
#在所有node节点上安装docker引擎(必须先创建docker,否则会出现:No resources found报错) yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce docker-ce-cli containerd.io systemctl start docker.service systemctl enable docker.service #创建kubernetes工作目录 mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} cd /opt/ #上传 node.zip 到 /opt 目录中,解压 node.zip 压缩包,获得kubelet.sh、proxy.sh unzip node.zip chmod +x kubelet.sh proxy.sh #在 master01 节点上操作,把 kubelet、kube-proxy 拷贝到 node 节点 cd /opt/kubernetes/kubernetes/server/bin scp kubelet kube-proxy root@192.168.13.20:/opt/kubernetes/bin/ scp kubelet kube-proxy root@192.168.13.30:/opt/kubernetes/bin/ mkdir -p /opt/kubernetes/kubeconfig cd /opt/kubernetes/kubeconfig #上传 kubeconfig.sh 文件到 /opt/kubernetes/kubeconfig 目录中,生成 kubeconfig 的配置文件 chmod +x kubeconfig.sh ./kubeconfig.sh 192.168.13.10 /opt/kubernetes/k8s-cert/ scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.13.20:/opt/kubernetes/cfg/ scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.13.30:/opt/kubernetes/cfg/ #RBAC授权,使用户 kubelet-bootstrap 能够有权限发起 CSR 请求 kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap #以上皆为master01节点操作 #在 node01 节点上操作,启动 kubelet 服务 cd /opt /opt/kubelet.sh 192.168.13.20 ps aux | grep kubelet #在 master01 节点上操作,通过 CSR 请求,通过命令获取节点名称(NAME列) kubectl get csr #通过对应命令为节点签发证书 kubectl certificate approve 节点名称(NAME列) #Approved,Issued 表示已授权 CSR 请求并签发证书(Approved,Issued状态表示已签发) kubectl get csr #查看节点,由于网络插件还没有部署,节点会没有准备就绪 NotReady kubectl get node #以上皆为master01节点操作 #在所有node节点上操作,加载 ip_vs 模块 for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*") do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i; done #启动proxy服务 /opt/proxy.sh 192.168.13.20 ps aux | grep kube-proxy
1.5 work node节点组件部署(截图)