前提环境部署:
主机名 |
IP地址 |
角色 |
k8s-init |
192.168.1.11 |
初始化节点 |
k8s-master1 |
192.168.1.12 |
管理节点 |
k8s-master2 |
192.168.1.13 |
管理节点 |
k8s-node1 |
192.168.1.15 |
工作节点 |
全部节点
设置hosts解析
hostnamectl set-hostname k8s-init
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-master2
hostnamectl set-hostname k8s-node1
bash
cat >>/etc/hosts<
192.168.1.11 k8s-init
192.168.1.12 k8s-master1
192.168.1.13 k8s-master2
192.168.1.15 k8s-node1
EOF
永久关闭防火墙与selinux和swap
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config
swapoff -a
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab
修改内核参数并加载ipvs模块
cat < /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
cat > /etc/sysconfig/modules/ipvs.modules <
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
k8s-init节点
部署依赖组件与安装配置httpd服务
cat < /etc/yum.repos.d/local.repo
[local]
name=local
baseurl=file:///opt/k8s-installer/docker-ce
gpgcheck=0
enabled=1
EOF
yum -y install vim lrzsz unzip
cd /opt/
unzip k8s-installer.zip
yum install -y httpd --disablerepo=* --enablerepo=local
sed -i 's/Listen 80/Listen 60080/g' /etc/httpd/conf/httpd.conf
cp -r /opt/k8s-installer/docker-ce/ /var/www/html/
systemctl enable httpd && systemctl start httpd
全部节点
安装并配置Docker
cat < /etc/yum.repos.d/local-http.repo
[local-http]
name=local-http
baseurl=http://192.168.1.11:60080/docker-ce
gpgcheck=0
enabled=1
EOF
mkdir /etc/docker
cat < /etc/docker/daemon.json
{
"insecure-registries": [
"192.168.1.11:65000"
],
"storage-driver": "overlay2"
}
EOF
yum install -y docker-ce docker-ce-cli containerd.io --disablerepo=* --enablerepo=local-http
systemctl enable docker && systemctl start docker
k8s-init节点
配置镜像仓库与Haproxy
docker load -i /opt/k8s-installer/registry-image.tar
docker images
docker run -d --restart=always --name pkg-registry -p 65000:5000 -v /opt/k8s-installer/registry/:/var/lib/registry index.alauda.cn/alaudaorg/distribution:latest
rpm -Uvh /var/www/html/docker-ce/openssl-*
yum install -y haproxy --disablerepo=* --enablerepo=local-http
配置 haproxy
cat < /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
log global
option dontlognull
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend k8s
bind *:7443
mode tcp
default_backend k8s-master
backend k8s-master
balance roundrobin
server master1 192.168.1.12:6443 check maxconn 2000
server master2 192.168.1.13:6443 check maxconn 2000
EOF
启动haproxy 服务
systemctl enable haproxy && systemctl start haproxy
k8s-master1、k8s-master2、k8s-node1
部署 Kubernetes集群
rpm -Uvh http://192.168.1.11:60080/docker-ce/libnetfilter_conntrack-1.0.6-1.el7_3.x86_64.rpm
yum install -y kubeadm kubectl kubelet --disablerepo=* --enablerepo=local-http
systemctl enable kubelet
配置 Kubelet Service
cat < /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
[Service]
#ExecStart=/usr/bin/kubelet
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/man ifests --allow-privileged=true"
Environment="KUBELET_INFRA_CONTAINER_IMAGE=--pod-infra-container-image=192.168.1.11:60080/k8s/pause:3.1"
ExecStart=/usr/bin/kubelet $KUBELET_SYSTEM_PODS_ARGS $KUBELET_INFRA_CONTAINER_IMAGE
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
k8s-master1
配置 Kubeadm 初始化文件
cat < /opt/kubeadm.conf
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.12
bindPort: 6443
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.1.11:7443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: 192.168.1.11:65000/k8s
kind: ClusterConfiguration
kubernetesVersion: v1.13.3
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
EOF
kubeadm config images list --config /opt/kubeadm.conf
kubeadm config images pull --config /opt/kubeadm.conf
初始化
kubeadm init --config /opt/kubeadm.conf
配置 Kubectl 认证
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
k8s-master2节点
免秘钥(双方都做)
ssh-keygen -t rsa //一路回车键即可
ssh-copy-id k8s-master1
ssh-keygen -t rsa //一路回车键即可
ssh-copy-id k8s-master2
添加其他 Master 节点到集群中
mkdir -p /etc/kubernetes/pki/etcd
scp root@k8s-master1:/etc/kubernetes/pki/ca.crt /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/ca.key /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/sa.key /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/sa.pub /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/front-proxy-ca.crt /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/front-proxy-ca.key /etc/kubernetes/pki/
scp root@k8s-master1:/etc/kubernetes/pki/etcd/ca.crt /etc/kubernetes/pki/etcd/
scp root@k8s-master1:/etc/kubernetes/pki/etcd/ca.key /etc/kubernetes/pki/etcd/
scp root@k8s-master1:/etc/kubernetes/admin.conf /etc/kubernetes/admin.conf
执行添加命令
#执行刚才k8s-master1的输出信息,注 意 添 加 --experimental-control-plane 参数
例如:kubeadm join 192.168.8.10:7443 --tokenxxx --discovery-token-ca-cert-hash xxx --experimental-control-plane
配置当前 Master 的 Kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
k8s-init节点
添加 Node 节点到集群
#执行刚才k8s-master1的输出信息
例如:kubeadm join 192.168.8.10:7443 --token xxx--discovery-token-ca-cert-hash xxx
可能报错的原因(如果不是前提环境没做好)
kubeadm reset #全部主机重置
kubeadm init --config /opt/kubeadm.conf #master1重新初始化
k8s-master1
安装 Flannel 插件并验证集群功能
scp root@k8s-init:/opt/k8s-installer/kube-flannel.yml /opt
sed -i "s#quay.io/coreos#192.168.1.11:65000/k8s#g" /opt/kube-flannel.yml
kubectl create -f /opt/kube-flannel.yml
kubectl taint node k8s-master1 node-role.kubernetes.io/master:NoSchedule-
kubectl taint node k8s-master2 node-role.kubernetes.io/master:NoSchedule-
kubectl get nodes
kubectl run --generator=run-pod/v1 test-nginx --image=192.168.1.11:65000/k8s/nginx
kubectl get pods -o wide |grep test-nginx
scp /run/flannel/subnet.env k8s-node1:/run/flannel/
验证服务是否可通
curl 输入Pod的IP