1.7、部署kube-controller-manager
- 所有
master节点
需要kube-controller-manager
1.7.0、创建kube-controller-manager请求证书
k8s-01:~ # cd /opt/k8s/ssl/ k8s-01:/opt/k8s/ssl # cat > kube-controller-manager-csr.json <<EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "127.0.0.1", "192.168.72.39", "192.168.72.40", "192.168.72.41" ], "names": [ { "C": "CN", "ST": "ShangHai", "L": "ShangHai", "O": "system:kube-controller-manager", "OU": "bandian" } ] } EOF
1.7.1、生成kube-controller-manager证书和私钥
k8s-01:/opt/k8s/ssl # cfssl gencert -ca=/opt/k8s/ssl/ca.pem \ -ca-key=/opt/k8s/ssl/ca-key.pem \ -config=/opt/k8s/ssl/ca-config.json \ -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
1.7.2、创建kube-controller-manager的kubeconfig文件
k8s-01:~ # cd /opt/k8s/ssl/ k8s-01:/opt/k8s/ssl # source /opt/k8s/bin/k8s-env.sh
"设置集群参数" k8s-01:/opt/k8s/ssl # kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-controller-manager.kubeconfig
"设置客户端认证参数" k8s-01:/opt/k8s/ssl # kubectl config set-credentials system:kube-controller-manager \ --client-certificate=kube-controller-manager.pem \ --client-key=kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.kubeconfig
"设置上下文参数" k8s-01:/opt/k8s/ssl # kubectl config set-context system:kube-controller-manager \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.kubeconfig
"设置默认上下文" k8s-01:/opt/k8s/ssl # kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
1.7.3、配置kube-controller-manager为systemctl启动
k8s-01:~ # cd /opt/k8s/conf/ k8s-01:/opt/k8s/conf # source /opt/k8s/bin/k8s-env.sh k8s-01:/opt/k8s/conf # cat > kube-controller-manager.service <<EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] WorkingDirectory=${K8S_DIR}/kube-controller-manager ExecStart=/opt/k8s/bin/kube-controller-manager \\ --v=2 \\ --cluster-name=kubernetes \\ --profiling \\ --logtostderr=true \\ --leader-elect=true \\ --bind-address=0.0.0.0 \\ --allocate-node-cidrs=true \\ --cluster-cidr=${CLUSTER_CIDR} \\ --service-cluster-ip-range=${SERVICE_CIDR} \\ --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\ --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\ --root-ca-file=/etc/kubernetes/cert/ca.pem \\ --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\ --experimental-cluster-signing-duration=87600h0m0s \\ --kubeconfig=/etc/kubernetes/cert/kube-controller-manager.kubeconfig \\ --requestheader-allowed-names \\ --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\ --requestheader-extra-headers-prefix="X-Remote-Extra-" \\ --requestheader-group-headers=X-Remote-Group \\ --requestheader-username-headers=X-Remote-User \\ --authorization-kubeconfig=/etc/kubernetes/cert/kube-controller-manager.kubeconfig \\ --controllers=*,bootstrapsigner,tokencleaner \\ --use-service-account-credentials=true Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
--controllers=*,bootstrapsigner,tokencleaner
启用的控制器列表,tokencleaner 用于自动清理过期的 Bootstrap token--profiling
开启profilling,通过web接口host:port/debug/pprof/分析性能--experimental-cluster-signing-duration
指定 TLS Bootstrap 证书的有效期--root-ca-file
放置到容器 ServiceAccount 中的 CA 证书,用来对 kube-apiserver 的证书进行校验--service-cluster-ip-range
指定 Service Cluster IP 网段,必须和 kube-apiserver 中的同名参数一致
--leader-elect=true
集群运行模式,启用选举功能被选为 leader 的节点负责处理工作,其它节点为阻塞状态
1.7.4、分发kube-controller-manager证书和文件到其他节点
#!/usr/bin/env bash source /opt/k8s/bin/k8s-env.sh for host in ${MASTER_IPS[@]} do printf "\e[1;34m${host}\e[0m\n" scp /opt/k8s/conf/kube-controller-manager.service \ ${host}:/etc/systemd/system/kube-controller-manager.service scp /opt/k8s/ssl/{kube-controller-manager*.pem,kube-controller-manager.kubeconfig} \ ${host}:/etc/kubernetes/cert/ done
1.7.5、启动kube-controller-manager服务
#!/usr/bin/env bash source /opt/k8s/bin/k8s-env.sh for host in ${MASTER_IPS[@]} do printf "\e[1;34m${host}\e[0m\n" ssh root@${host} "mkdir -p ${K8S_DIR}/kube-controller-manager" ssh root@${host} "systemctl daemon-reload && \ systemctl enable kube-controller-manager --now && \ systemctl status kube-controller-manager | grep Active" done
1.7.6、查看kube-controller-manager端口
k8s-01:~ # ss -nltp | grep kube-contro LISTEN 0 128 :::10252 :::* users:(("kube-controller",pid=65221,fd=7)) LISTEN 0 128 :::10257 :::* users:(("kube-controller",pid=65221,fd=8))
1.7.7、查看当前的leader
k8s-01:~ # kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml apiVersion: v1 kind: Endpoints metadata: annotations: control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-01_d04c6ed1-5048-4fe2-aaee-ed3043b24e6b","leaseDurationSeconds":15,"acquireTime":"2021-02-12T16:52:57Z","renewTime":"2021-02-12T16:53:07Z","leaderTransitions":0}' creationTimestamp: "2021-02-12T16:52:57Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:control-plane.alpha.kubernetes.io/leader: {} manager: kube-controller-manager operation: Update time: "2021-02-12T16:52:57Z" name: kube-controller-manager namespace: kube-system resourceVersion: "355" selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager uid: fc7f643d-a71f-4a58-b66c-2edcacbda693