一、延长k8s证书时间
查看 apiserver 证书有效时间:默认是一年的有效期
[root@xuegod63 ~]#
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep Not
延长证书过期时间
1.把 update-kubeadm-cert.sh 文件上传到 xuegod63 节点
vim update-kubeadm-cert.sh
#!/bin/bash set -o errexit set -o pipefail # set -o xtrace log::err() { printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$@\n" } log::info() { printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[32mINFO: \033[0m$@\n" } log::warning() { printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[33mWARNING: \033[0m$@\n" } check_file() { if [[ ! -r ${1} ]]; then log::err "can not find ${1}" exit 1 fi } # get x509v3 subject alternative name from the old certificate cert::get_subject_alt_name() { local cert=${1}.crt check_file "${cert}" local alt_name=$(openssl x509 -text -noout -in ${cert} | grep -A1 'Alternative' | tail -n1 | sed 's/[[:space:]]*Address//g') printf "${alt_name}\n" } # get subject from the old certificate cert::get_subj() { local cert=${1}.crt check_file "${cert}" local subj=$(openssl x509 -text -noout -in ${cert} | grep "Subject:" | sed 's/Subject:/\//g;s/\,/\//;s/[[:space:]]//g') printf "${subj}\n" } cert::backup_file() { local file=${1} if [[ ! -e ${file}.old-$(date +%Y%m%d) ]]; then cp -rp ${file} ${file}.old-$(date +%Y%m%d) log::info "backup ${file} to ${file}.old-$(date +%Y%m%d)" else log::warning "does not backup, ${file}.old-$(date +%Y%m%d) already exists" fi } # generate certificate whit client, server or peer # Args: # $1 (the name of certificate) # $2 (the type of certificate, must be one of client, server, peer) # $3 (the subject of certificates) # $4 (the validity of certificates) (days) # $5 (the x509v3 subject alternative name of certificate when the type of certificate is server or peer) cert::gen_cert() { local cert_name=${1} local cert_type=${2} local subj=${3} local cert_days=${4} local alt_name=${5} local cert=${cert_name}.crt local key=${cert_name}.key local csr=${cert_name}.csr local csr_conf="distinguished_name = dn\n[dn]\n[v3_ext]\nkeyUsage = critical, digitalSignature, keyEncipherment\n" check_file "${key}" check_file "${cert}" # backup certificate when certificate not in ${kubeconf_arr[@]} # kubeconf_arr=("controller-manager.crt" "scheduler.crt" "admin.crt" "kubelet.crt") # if [[ ! "${kubeconf_arr[@]}" =~ "${cert##*/}" ]]; then # cert::backup_file "${cert}" # fi case "${cert_type}" in client) openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \ -config <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -out ${csr} openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \ -extfile <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -days ${cert_days} -out ${cert} log::info "generated ${cert}" ;; server) openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \ -config <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -out ${csr} openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \ -extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert} log::info "generated ${cert}" ;; peer) openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \ -config <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -out ${csr} openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \ -extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert} log::info "generated ${cert}" ;; *) log::err "unknow, unsupported etcd certs type: ${cert_type}, supported type: client, server, peer" exit 1 esac rm -f ${csr} } cert::update_kubeconf() { local cert_name=${1} local kubeconf_file=${cert_name}.conf local cert=${cert_name}.crt local key=${cert_name}.key # generate certificate check_file ${kubeconf_file} # get the key from the old kubeconf grep "client-key-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${key} # get the old certificate from the old kubeconf grep "client-certificate-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${cert} # get subject from the old certificate local subj=$(cert::get_subj ${cert_name}) cert::gen_cert "${cert_name}" "client" "${subj}" "${CAER_DAYS}" # get certificate base64 code local cert_base64=$(base64 -w 0 ${cert}) # backup kubeconf # cert::backup_file "${kubeconf_file}" # set certificate base64 code to kubeconf sed -i 's/client-certificate-data:.*/client-certificate-data: '${cert_base64}'/g' ${kubeconf_file} log::info "generated new ${kubeconf_file}" rm -f ${cert} rm -f ${key} # set config for kubectl if [[ ${cert_name##*/} == "admin" ]]; then mkdir -p ~/.kube cp -fp ${kubeconf_file} ~/.kube/config log::info "copy the admin.conf to ~/.kube/config for kubectl" fi } cert::update_etcd_cert() { PKI_PATH=${KUBE_PATH}/pki/etcd CA_CERT=${PKI_PATH}/ca.crt CA_KEY=${PKI_PATH}/ca.key check_file "${CA_CERT}" check_file "${CA_KEY}" # generate etcd server certificate # /etc/kubernetes/pki/etcd/server CART_NAME=${PKI_PATH}/server subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME}) cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-server" "${CAER_DAYS}" "${subject_alt_name}" # generate etcd peer certificate # /etc/kubernetes/pki/etcd/peer CART_NAME=${PKI_PATH}/peer subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME}) cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-peer" "${CAER_DAYS}" "${subject_alt_name}" # generate etcd healthcheck-client certificate # /etc/kubernetes/pki/etcd/healthcheck-client CART_NAME=${PKI_PATH}/healthcheck-client cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-etcd-healthcheck-client" "${CAER_DAYS}" # generate apiserver-etcd-client certificate # /etc/kubernetes/pki/apiserver-etcd-client check_file "${CA_CERT}" check_file "${CA_KEY}" PKI_PATH=${KUBE_PATH}/pki CART_NAME=${PKI_PATH}/apiserver-etcd-client cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-etcd-client" "${CAER_DAYS}" # restart etcd docker ps | awk '/k8s_etcd/{print$1}' | xargs -r -I '{}' docker restart {} || true log::info "restarted etcd" } cert::update_master_cert() { PKI_PATH=${KUBE_PATH}/pki CA_CERT=${PKI_PATH}/ca.crt CA_KEY=${PKI_PATH}/ca.key check_file "${CA_CERT}" check_file "${CA_KEY}" # generate apiserver server certificate # /etc/kubernetes/pki/apiserver CART_NAME=${PKI_PATH}/apiserver subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME}) cert::gen_cert "${CART_NAME}" "server" "/CN=kube-apiserver" "${CAER_DAYS}" "${subject_alt_name}" # generate apiserver-kubelet-client certificate # /etc/kubernetes/pki/apiserver-kubelet-client CART_NAME=${PKI_PATH}/apiserver-kubelet-client cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-kubelet-client" "${CAER_DAYS}" # generate kubeconf for controller-manager,scheduler,kubectl and kubelet # /etc/kubernetes/controller-manager,scheduler,admin,kubelet.conf cert::update_kubeconf "${KUBE_PATH}/controller-manager" cert::update_kubeconf "${KUBE_PATH}/scheduler" cert::update_kubeconf "${KUBE_PATH}/admin" # check kubelet.conf # https://github.com/kubernetes/kubeadm/issues/1753 set +e grep kubelet-client-current.pem /etc/kubernetes/kubelet.conf > /dev/null 2>&1 kubelet_cert_auto_update=$? set -e if [[ "$kubelet_cert_auto_update" == "0" ]]; then log::warning "does not need to update kubelet.conf" else cert::update_kubeconf "${KUBE_PATH}/kubelet" fi # generate front-proxy-client certificate # use front-proxy-client ca CA_CERT=${PKI_PATH}/front-proxy-ca.crt CA_KEY=${PKI_PATH}/front-proxy-ca.key check_file "${CA_CERT}" check_file "${CA_KEY}" CART_NAME=${PKI_PATH}/front-proxy-client cert::gen_cert "${CART_NAME}" "client" "/CN=front-proxy-client" "${CAER_DAYS}" # restart apiserve, controller-manager, scheduler and kubelet docker ps | awk '/k8s_kube-apiserver/{print$1}' | xargs -r -I '{}' docker restart {} || true log::info "restarted kube-apiserver" docker ps | awk '/k8s_kube-controller-manager/{print$1}' | xargs -r -I '{}' docker restart {} || true log::info "restarted kube-controller-manager" docker ps | awk '/k8s_kube-scheduler/{print$1}' | xargs -r -I '{}' docker restart {} || true log::info "restarted kube-scheduler" systemctl restart kubelet log::info "restarted kubelet" } main() { local node_tpye=$1 KUBE_PATH=/etc/kubernetes CAER_DAYS=36500 # backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d) cert::backup_file "${KUBE_PATH}" case ${node_tpye} in etcd) # update etcd certificates cert::update_etcd_cert ;; master) # update master certificates and kubeconf cert::update_master_cert ;; all) # update etcd certificates cert::update_etcd_cert # update master certificates and kubeconf cert::update_master_cert ;; *) log::err "unknow, unsupported certs type: ${cert_type}, supported type: all, etcd, master" printf "Documentation: https://github.com/yuyicai/update-kube-cert example: '\033[32m./update-kubeadm-cert.sh all\033[0m' update all etcd certificates, master certificates and kubeconf /etc/kubernetes ├── admin.conf ├── controller-manager.conf ├── scheduler.conf ├── kubelet.conf └── pki ├── apiserver.crt ├── apiserver-etcd-client.crt ├── apiserver-kubelet-client.crt ├── front-proxy-client.crt └── etcd ├── healthcheck-client.crt ├── peer.crt └── server.crt '\033[32m./update-kubeadm-cert.sh etcd\033[0m' update only etcd certificates /etc/kubernetes └── pki ├── apiserver-etcd-client.crt └── etcd ├── healthcheck-client.crt ├── peer.crt └── server.crt '\033[32m./update-kubeadm-cert.sh master\033[0m' update only master certificates and kubeconf /etc/kubernetes ├── admin.conf ├── controller-manager.conf ├── scheduler.conf ├── kubelet.conf └── pki ├── apiserver.crt ├── apiserver-kubelet-client.crt └── front-proxy-client.crt " exit 1 esac } main "$@"
2.在 xuegod63 上执行如下:
1)给 update-kubeadm-cert.sh 证书授权可执行权限
[root@xuegod63 ~]#chmod +x update-kubeadm-cert.sh
2)执行下面命令,修改证书过期时间,把时间延长到 100 年
[root@xuegod63 ~]# ./update-kubeadm-cert.sh all
3)在 xuegod63 节点查询 Pod 是否正常,能查询出数据说明证书签发完成
kubectl get pods -n kube-system
可以看到都正常
验证证书有效时间是否延长到 100 年
[root@xuegod63 ~]#
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep Not
二、测试 k8s 集群的 DNS 解析和网络是否正常
#把 busybox-1-28.tar.gz 上传到 xuegod64 和 xuegod62 节点,手动解压
[root@xuegod64 ~]# ctr -n=k8s.io images import busybox-1-28.tar.gz
[root@xuegod62 ~]# ctr -n=k8s.io images import busybox-1-28.tar.gz
资料链接:https://pan.baidu.com/s/17e6AUn4Z-qPyTv6WnAjhtw?pwd=qrhq 提取码:qrhq
基于镜像创建一个pod,然后在pod 里面ping 百度查看网络通不通
[root@xuegod63 ~]#
kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
/ # ping www.baidu.com
PING www.baidu.com (39.156.66.18): 56 data bytes
64 bytes from 39.156.66.18: seq=0 ttl=127 time=39.3 ms
#通过上面可以看到能访问网络,说明 calico 网络插件正常
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
看到上面内容,说明 k8s 的 coredns 服务正常