一、部署流程
1.0 架构概况
节点 | 服务 |
1.1 初始化操作
#所有节点操作,关闭防火墙、selinux、swap交换、添加本地域名解析、调整内核参数、开启时间同步 systemctl disable --now firewalld iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X setenforce 0 sed -i 's/enforcing/disabled/' /etc/selinux/config swapoff -a sed -ri 's/.*swap.*/#&/' /etc/fstab cat >> /etc/hosts << EOF 192.168.13.10 master01 192.168.13.20 node01 192.168.13.30 node02 192.168.13.40 master02 192.168.13.50 nginx01 192.168.13.60 nginx02 EOF cat > /etc/sysctl.d/k8s.conf << EOF #开启网桥模式,可将网桥的流量传递给iptables链 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 #关闭ipv6协议 net.ipv6.conf.all.disable_ipv6=1 net.ipv4.ip_forward=1 EOF sysctl --system #加载ipvsadm内核(-o:只打印匹配项,"^[^.]*":从第一个字符到 . 字符结束) for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*") do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i; done #安装时间同步器,进行时间同步 yum install ntpdate -y ntpdate time.windows.com #所有节点安装docke yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce docker-ce-cli containerd.io mkdir /etc/docker cat > /etc/docker/daemon.json <<EOF { #开机镜像加速功能 "registry-mirrors": ["https://6ijb8ubo.mirror.aliyuncs.com"], #设置容器引擎为systemd(比cgroup对cpu的管理方式更加简单) "exec-opts": ["native.cgroupdriver=systemd"], #设置日志存储格式为json文件 "log-driver": "json-file", #设置日志存储最大内存为100M "log-opts": { "max-size": "100m" } } EOF systemctl daemon-reload systemctl restart docker.service systemctl enable docker.service docker info | grep "Cgroup Driver" #成功则会显示:Cgroup Driver: systemd #所有节点安装kubeadm,kubelet和kubectl cat > /etc/yum.repos.d/kubernetes.repo << EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF #安装kubelet(管理node节点的组件)/kubeadm(生成安装k8s的配置文件)/kubectl(管理pod/node等资源的命令)服务并设置为开机自启 yum install -y kubelet-1.20.11 kubeadm-1.20.11 kubectl-1.20.11 systemctl enable kubelet.service #初始化环境配置结束
1.2 master/node部署
#在 master节点查看初始化需要哪些镜像 kubeadm config images list cd /opt #上传v1.20.11.zip压缩包至/opt目录(里面包含了所有需要的镜像,若没有则需要去官网下载) unzip v1.20.11.zip -d /opt/k8s cd /opt/k8s/v1.20.11 for i in $(ls *.tar); do docker load -i $i; done #复制镜像和脚本到node节点 scp -r /opt/k8s root@192.168.13.20:/opt scp -r /opt/k8s root@192.168.13.30:/opt kubeadm config print init-defaults > /opt/kubeadm-config.yaml cd /opt vim kubeadm-config.yaml 11行:localAPIEndpoint: 12行:advertiseAddress: 192.168.13.10 34行:kubernetesVersion: v1.20.11 35行:networking: 36行:dnsDomain: cluster.local 37行:podSubnet: "10.244.0.0/16" 38行:serviceSubnet: 10.96.0.0/16 39行:scheduler: {} #末尾再添加以下内容(注意---也要加,因为是yaml文件,作为分隔符不可缺少) --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs #配置文件到此结束 #调用配置文件生成 kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log #执行下方三条命令(在上方命令的提示信息会出现) mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config #查看 kubeadm-init 日志,kubernetes配置文件目录,存放ca等证书和密码的目录 #less kubeadm-init.log #ls /etc/kubernetes/ #ls /etc/kubernetes/pki #在所有node节点上导入镜像 cd /opt/k8s/v1.20.11 for i in $(ls *.tar); do docker load -i $i; done #在所有node节点上执行(上方命令提示信息的最后一条信息,很长一段) kubeadm join 192.168.13.10:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:很长一段,每次不同 #成功则会出现:Run 'kubectl get nodes'...this node join...的提示内容 cd/etc/kubernetes/manifests vim kube-controller-manager.yaml 将所有127.0.0.1改为当前master地址 注释port vim kube-scheduler.yaml 将所有127.0.0.1改为当前master地址 注释port #重启kubelet服务 systemctl restart kubelet #查看集群状态 kubectl get cs #将配置文件复制过去,方式出现(The connection to the server localhost:8080 was refused) scp /etc/kubernetes/admin.conf root@192.168.13.20:/etc/kubernetes/admin.conf scp /etc/kubernetes/admin.conf root@192.168.13.30:/etc/kubernetes/admin.conf #所有node节点操作 echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile source ~/.bash_profile
1.3 flannel网络部署
cd /opt #所有节点上传flannel.tar到/opt目录中 docker load -i flannel.tar #在master01节点上操作上传kube-flannel.yml文件到/opt目录中,部署CNI网络(39-44行可能需要改) #cd /opt #kubectl apply -f kube-flannel.yml #此时可能需要等待几秒钟,才会出现ready kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl get nodes #单节点master部署k8s至此结束
二、结语
- 负载均衡的pod,出现异常状态,考虑是没有对应的镜像,导入失败