kubeadm基于containerd安装kubernetes1.24.2

本文涉及的产品
容器服务 Serverless 版 ACK Serverless,317元额度 多规格
云防火墙,500元 1000GB
容器服务 Serverless 版 ACK Serverless,952元额度 多规格
简介: kubeadm基于containerd安装kubernetes1.24.2

前言

安装可以参考kubernetes官方文档

1 准备环境(如无特别声明所有节点均需要操作)

1.1 操作系统环境

本次安装我选用了3台Ubuntu 2004版本虚拟机,配置均为4 vCPU + 4G 内存 + 40G 系统盘(安装系统步骤略)。

root@master:~# cat /etc/os-release 
NAME="Ubuntu"
VERSION="20.04.3 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.3 LTS"
VERSION_ID="20.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=focal
UBUNTU_CODENAME=focal

具体安装系统步骤略。

1.2 关闭防火墙

Ubuntu中无selinux,防火墙服务为ufw,因此关闭防火墙并禁止防火墙开机自启动命令如下:

root@master:~# systemctl disable ufw
Synchronizing state of ufw.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install disable ufw

1.3 配置时间同步状态(联网方式)

安装ntpdate并同步阿里云时间

root@master:~# apt install -y ntpdate
root@master:~# ntpdate  time1.aliyun.com
23 Jul 05:58:21 ntpdate[1433667]: adjust time server 203.107.6.88 offset -0.019383 sec

1.4 修改主机名,hosts文件

本次三台主机我分别命名为master、node1和node2

root@master:~# hostnamectl set-hostname --static master   # --static可以省略
root@master:~# cat /etc/hosts
127.0.0.1 localhost
10.10.21.105 master
10.10.21.108 node1
10.10.21.107 node2

1.5 在master节点配置免密

为方便后续操作,可以在master进行免密

root@master:~# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:GrbsmJxp/zUZgHjSHMw3Qf/gmUGWKcTriPPj8kNO+GA root@zhuzhu
The key's randomart image is:
+---[RSA 2048]----+
|     o.++.oo     |
|     +oo=+o      |
|    o =..++      |
|     o  .o *     |
|     oooS = .    |
|    Eo++.  o     |
|   . O+   +      |
|   .o**  . .     |
|   .*==+.        |
+----[SHA256]-----+
root@master:~# ssh-copy-id node1
root@master:~# ssh-copy-id node2
###################################################
# 如果不想看那么一长串的打印输出,可以直接执行以下命令
ssh-keygen -t rsa -b 2048 -P '' -q -f .ssh/id_rsa 

1.6 关闭swap分区并禁止开机自动挂载

kubernetes要求工作节点必须关闭swap分区,本次我直接三台机器都关掉了。

root@master:~# swapoff -a
root@master:~# vim /etc/fstab  #这一步是注释掉swap,但是我这台虚拟机直接没有swap的挂载信息
root@master:~# free -m
              total        used        free      shared  buff/cache   available
Mem:           3913        1161         263           3        2488        2385
Swap:             0           0           0

1.7 加载内核模块

cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack #内核小于4.18,把这行改成nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
cat >> /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat > /etc/sysctl.d/k8s.conf <<EOF 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
# 应用 sysctl 参数而不重新启动
modprobe bridge #如果上一条命令报错可尝试使用本命令修复之后再使用sysctl命令加载配置内核优化常用参数详解:
net.ipv4.ip_forward = 1 #

内核优化常用参数详解:

net.ipv4.ip_forward = 1 #其值为0,说明禁止进行IP转发;如果是1,则说明IP转发功能已经打开。
net.bridge.bridge-nf-call-iptables = 1 #二层的网桥在转发包时也会被iptables的FORWARD规则所过滤,这样有时会出现L3层的iptables rules去过滤L2的帧的问题
net.bridge.bridge-nf-call-ip6tables = 1 #是否在ip6tables链中过滤IPv6包 
fs.may_detach_mounts = 1 #当系统有容器运行时,需要设置为1
vm.overcommit_memory=1  
#0, 表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
#1, 表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
#2, 表示内核允许分配超过所有物理内存和交换空间总和的内存
vm.panic_on_oom=0 
#OOM就是out of memory的缩写,遇到内存耗尽、无法分配的状况。kernel面对OOM的时候,咱们也不能慌乱,要根据OOM参数来进行相应的处理。
#值为0:内存不足时,启动 OOM killer。
#值为1:内存不足时,有可能会触发 kernel panic(系统重启),也有可能启动 OOM killer。
#值为2:内存不足时,表示强制触发 kernel panic,内核崩溃GG(系统重启)。
fs.inotify.max_user_watches=89100 #表示同一用户同时可以添加的watch数目(watch一般是针对目录,决定了同时同一用户可以监控的目录数量)
fs.file-max=52706963 #所有进程最大的文件数
fs.nr_open=52706963 #单个进程可分配的最大文件数
net.netfilter.nf_conntrack_max=2310720 #连接跟踪表的大小,建议根据内存计算该值CONNTRACK_MAX = RAMSIZE (in bytes) / 16384 / (x / 32),并满足nf_conntrack_max=4*nf_conntrack_buckets,默认262144
net.ipv4.tcp_keepalive_time = 600  #KeepAlive的空闲时长,或者说每次正常发送心跳的周期,默认值为7200s(2小时)
net.ipv4.tcp_keepalive_probes = 3 #在tcp_keepalive_time之后,没有接收到对方确认,继续发送保活探测包次数,默认值为9(次)
net.ipv4.tcp_keepalive_intvl =15 #KeepAlive探测包的发送间隔,默认值为75s
net.ipv4.tcp_max_tw_buckets = 36000 #Nginx 之类的中间代理一定要关注这个值,因为它对你的系统起到一个保护的作用,一旦端口全部被占用,服务就异常了。 tcp_max_tw_buckets 能帮你降低这种情况的发生概率,争取补救时间。
net.ipv4.tcp_tw_reuse = 1 #只对客户端起作用,开启后客户端在1s内回收
net.ipv4.tcp_max_orphans = 327680 #这个值表示系统所能处理不属于任何进程的socket数量,当我们需要快速建立大量连接时,就需要关注下这个值了。
net.ipv4.tcp_orphan_retries = 3
#出现大量fin-wait-1
#首先,fin发送之后,有可能会丢弃,那么发送多少次这样的fin包呢?fin包的重传,也会采用退避方式,在2.6.358内核中采用的是指数退避,2s,4s,最后的重试次数是由tcp_orphan_retries来限制的。
net.ipv4.tcp_syncookies = 1 #tcp_syncookies是一个开关,是否打开SYN Cookie功能,该功能可以防止部分SYN攻击。tcp_synack_retries和tcp_syn_retries定义SYN的重试次数。
net.ipv4.tcp_max_syn_backlog = 16384 #进入SYN包的最大请求队列.默认1024.对重负载服务器,增加该值显然有好处.
net.ipv4.ip_conntrack_max = 65536 #表明系统将对最大跟踪的TCP连接数限制默认为65536
net.ipv4.tcp_max_syn_backlog = 16384 #指定所能接受SYN同步包的最大客户端数量,即半连接上限;
net.ipv4.tcp_timestamps = 0 #在使用 iptables 做 nat 时,发现内网机器 ping 某个域名 ping 的通,而使用 curl 测试不通, 原来是 net.ipv4.tcp_timestamps 设置了为 1 ,即启用时间戳
net.core.somaxconn = 16384  #Linux中的一个kernel参数,表示socket监听(listen)的backlog上限。什么是backlog呢?backlog就是socket的监听队列,当一个请求(request)尚未被处理或建立时,他会进入backlog。而socket server可以一次性处理backlog中的所有请求,处理后的请求不再位于监听队列中。当server处理请求较慢,以至于监听队列被填满后,新来的请求会被拒绝。

1.8 安装containerd

安装containerd可以直接使用docker的apt源,为了更快捷方便安装,本次使用阿里源来进行安装

a43ba0c7cf344d56bfd0984f0573c3c0.png然后就可以按帮助来添加apt源了,添加完源之后需要update然后就可以安装containerd包了

# step 1: 安装必要的一些系统工具
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# step 2: 安装GPG证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# Step 3: 写入软件源信息
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# Step 4: 更新并安装containerd
sudo apt-get -y update
sudo apt-get -y install containerd.io

安装完成之后可以看一下版本信息

root@master:~# containerd --version
containerd containerd.io 1.6.6 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1

生成containerd配置文件

root@master:~# containerd config default | sudo tee /etc/containerd/config.toml  #如果报错可以先mkdir /etc/containerd目录再执行

由于一些众所周知的原因,从谷歌无法拉取镜像,所以我们需要修改镜像仓库,由于配置文件太长,我这里只把需要修改的地方贴出来

vim  /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6" #将sandbox_image后面的谷歌仓库改为阿里云仓库地址
systemd_cgroup = true #将原来默认的false改为true,如果不修改可能会报warning提示cgroup控制器有问题(需要和kubelet的控制器保持一致)
runtime_type = "io.containerd.runtime.v1.linux" #如果不修改这个,后面可能无法正常拉取镜像

如果觉得vim修改比较麻烦,我们也可以直接用sed给改掉

 sed -i "s@systemd_cgroup\ \=\ false@systemd_cgroup\ \=\ true@g" /etc/containerd/config.toml
 sed -i "s@registry.k8s.io@registry.aliyuncs.com/google_containers@g" /etc/containerd/config.toml
# sed -i "s@config_path\ \=\ \"\"@config_path\ \=\ \"/etc/containerd/certs.d\"@g" /etc/containerd/config.toml
# 这一句是配置containerd加速的话可以改一下,如果不做配置可以忽略
 sed -i "s@runtime_type.*@runtime_type\ \=\ \"io.containerd.runtime.v1.linux\"@g" /etc/containerd/config.toml

如果需要配置containerd加速的话可以按下面方法

mkdir -p /etc/containerd/certs.d/docker.io  #这里的最终子文件夹名字和下面的server对应
cat > /etc/containerd/certs.d/docker.io/hosts.toml <<EOF
server = "https://docker.io"
[host."https://hub-mirror.c.163.com"]
  capabilities = ["pull", "resolve"]
EOF

修改完以上几处即可保存退出了,然后可以用systemd将containerd启动起来并查看状态是否正常

root@master:~# systemctl daemon-reload
root@master:~# systemctl enable --now containerd && systemctl status containerd

和docker一样,containerd也分为server端和client端

root@master:~# ctr --version
ctr containerd.io 1.6.6
root@master:~# ctr version
Client:
  Version:  1.6.6
  Revision: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
  Go version: go1.17.11
Server:
  Version:  1.6.6
  Revision: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
  UUID: 8b560310-ffe2-45b0-9931-da6708f182af

1.9 安装kubeadm、kubelet、kubectl

同安装containerd,我们需要先配置kubernetes源,我们仍然选用阿里源

apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -  #导入key
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF 
root@master:~# apt-get update #Ubuntu安装完成之后一定要update一下
root@master:~# apt-cache madison kubelet  #可以用这条命令查看目前可以安装的软件包版本
   kubelet |  1.24.3-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
   kubelet |  1.24.2-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
   kubelet |  1.24.1-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
   kubelet |  1.24.0-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
root@master:~# apt-get install -y kubelet=1.24.2-00 kubeadm=1.24.2-00  kubectl=1.24.2-00  #软件包名=version 可以指定安装版本,不加这个默认安装最新版本

根据官方解释:

kubeadm:用来初始化集群的指令。

kubelet:在集群中的每个节点上用来启动 Pod 和容器等。

kubectl:用来与集群通信的命令行工具。

kubeadm 不能 帮你安装或者管理 kubelet 或 kubectl,所以你需要 确保它们与通过 kubeadm 安装的控制平面的版本相匹配。 如果不这样做,则存在发生版本偏差的风险,可能会导致一些预料之外的错误和问题。 然而,控制平面与 kubelet 间的相差一个次要版本不一致是支持的,但 kubelet 的版本不可以超过 API 服务器的版本。 例如,1.7.0 版本的 kubelet 可以完全兼容 1.8.0 版本的 API 服务器,反之则不可以。

1.10 配置kubeadm、kubectl、crictl命令补全

默认安装完成软件包之后无法tab补全,我们可以在环境变量中配置一下

root@master:~# cat >> .bashrc <<EOF
source <(kubeadm completion bash)
source <(kubectl completion bash)
source <(crictl completion bash)
EOF
root@master:~# source .bashrc #加载配置的环境变量之后即可tab补全命令了

1.11 修改crictl配置

默认装好containerd之后会有一些问题,需要我们修改一些配置文件

root@master:~# ll /run/containerd/containerd.sock #查看containerd的sock文件
srw-rw---- 1 root root 0 Jul 21 03:51 /run/containerd/containerd.sock=
root@master:~# crictl config runtime-endpoint unix:///run/containerd/containerd.sock  #配置containerd的sock文件

再修改crictl的配置文件

root@master:~# vim /etc/crictl.yaml
runtime-endpoint: "unix:///run/containerd/containerd.sock"
image-endpoint: "unix:///run/containerd/containerd.sock"
timeout: 10 #超时时间不宜过短,我这里修改成10秒了
debug: false
pull-image-on-create: false
disable-pull-on-run: false

重启服务,并查看命令是否正常

root@master:~# systemctl daemon-reload && systemctl restart containerd
root@master:~# crictl images
IMAGE                                                             TAG                 IMAGE ID            SIZE

2 开始正式安装kubernetes(主要在master执行操作)

2.1 kubeadm初始化集群

root@master:~# kubeadm config print init-defaults > init.yaml  #将初始化配置直接输出到当前目录一个文件,方便修改
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.10.21.105 #这个地址需要修改为master节点地址
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master #需要和master的hostname匹配,否则会报错
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #修改为阿里云仓库,否则拉不下来镜像
kind: ClusterConfiguration
kubernetesVersion: 1.24.2 #版本需要对应好,否则可能有兼容性问题
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.16.0.0/12 #子网可以自己喜欢,这个是后期分配给service的,切记不要和已有的节点IP出现重叠
scheduler: {}

2.1.1 生成更详细的初始化文件

按2.1步骤生成的初始化文件默认部署的k8s使用的iptables模式,可以按以下方式配置更多需要定义的参数

kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm_init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 1.2.3.4
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12 #指定svc子网
  podSubnet: 10.244.0.0/16 #指定pod子网
scheduler: {}
controlPlaneEndpoint: "example.com:6443"  # 修改为自己的域名
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs #kube-proxy模式
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10 #coredns地址,需要在svc子网内
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

通过刚刚修改的文件来初始化集群

root@master:~# kubeadm init --config=init.yaml #如果按2.1.1的话就是 kubeadm init --config=kubeadm_init.yaml

出现类似于以下提示就代表初始化完成(由于我集群已经部署完成了,所以从其他地方找了一张图过来)

7998cf9de0d74f3c965c27eb0d3fd065.png

按照提示执行

root@master:~# export KUBECONFIG=/etc/kubernetes/admin.conf #如果是root用户执行这条,普通用户需要执行上面的那几条,并且如果你将以上文件scp给其它节点,那么其它节点也能执行kubectl命令

然后在其他work节点执行kubeadm join 来加入集群中,如果token找不到了,可以执行以下命令重新生成

root@master:~# kubeadm token create --print-join-command 

2.2 安装kubernetes网络插件

kubernetes网络插件有很多,比如flannel、calico等等,具体区别可以自行查询,本次我选用的是calico

root@master:~#  curl https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml -O #下载calico的yaml文件并准备修改

382f8505a14d484dba99a082b6a97cf9.png

这里定义的是容器网络的网段,取消注释并修改为之前定义的pod网段(我这边的是10.16.0.0/12)

修改完成之后即可用kubectl指令来应用了

root@master:~# kubectl apply -f calico.yaml 

接下来等待容器全部拉起来并且正常运行,一个小型kubernetes就基本上部署完成了

root@master:~# kubectl get pod -n kube-system 
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-555bc4b957-s6vqq   1/1     Running   0          23h
calico-kube-controllers-555bc4b957-vfsvh   1/1     Running   0          2d4h
calico-node-cz2mt                          1/1     Running   0          2d5h
calico-node-l66tm                          1/1     Running   0          2d5h
calico-node-qfm46                          1/1     Running   0          2d4h
coredns-74586cf9b6-c758d                   1/1     Running   0          2d8h
coredns-74586cf9b6-g46m2                   1/1     Running   0          2d8h
etcd-master                                1/1     Running   0          2d8h
kube-apiserver-master                      1/1     Running   0          2d8h
kube-controller-manager-master             1/1     Running   0          2d8h
kube-proxy-87fl2                           1/1     Running   0          2d4h
kube-proxy-cfhv2                           1/1     Running   0          2d8h
kube-proxy-x5p4p                           1/1     Running   0          2d4h
kube-scheduler-master                      1/1     Running   0          2d8h
root@master:~# kubectl get node
NAME     STATUS   ROLES           AGE    VERSION
master   Ready    control-plane   2d8h   v1.24.2
node1    Ready    <none>          2d5h   v1.24.2
node2    Ready    <none>          2d5h   v1.24.2

3 WebUI

其实我不喜欢部署官方的dashboard,个人感觉不是很好用,然后之前在部署老版本kubernetes的时候我喜欢部署kubesphere或者kuboard等一些开源的第三方webui。我这次部署1.24.2发现这几个webui目前都不兼容这个版本,因此本次就没有部署webui,如果想要部署官方的dashboard的话建议直接上github搜索dashboard然后复制yaml到本地yaml文件(下载实在太慢了),在本地直接kubectl apply一下即可部署完成。

root@master:~# cat recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.6.0
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.8
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
root@master:~# kubectl apply -f  recommended.yaml
#等到pod都正常运行就代表dashboard已经部署好了
root@master:~# kubectl get pods -n kubernetes-dashboard 
NAME                                        READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-8c47d4b5d-wbsmx   1/1     Running   0          2d2h
kubernetes-dashboard-5676d8b865-q2mvv       1/1     Running   0          2d2h

但是默认情况下dashboard的service只有集群地址,需要外部访问的话需要编辑一下对应的服务

root@master:~# kubectl edit service kubernetes-dashboard -n kubernetes-dashboard 
Edit cancelled, no changes made.
root@master:~# kubectl get service -n kubernetes-dashboard 
NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.29.5.10     <none>        8000/TCP        2d2h
kubernetes-dashboard        NodePort    10.25.91.231   <none>        443:30003/TCP   2d2h

94ab07f529fc400aa705c937e690d58d.png

先创建service account并绑定默认cluster-admin管理员集群角色,此时即可在局域网访问dashboard的WebUI了,访问之前需要手动创建token

root@master:~# kubectl create serviceaccount dashboard-admin -n kube-system
root@master:~# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
root@master:~# kubectl create token -n kube-system  dashboard-admin
eyJhbGciOiJSUzI1NiIsImtpZCI6IldtRlpiNmpzOVNxNElNOS1maTY5ZThkUXRyZDNBNy1URUd6N05yM2RiT0kifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjU4NTk1OTE0LCJpYXQiOjE2NTg1OTIzMTQsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiYmYzY2VjZjctN2NkYS00NjFkLWE4YjMtMTY0ZWU3Y2RlNTU2In19LCJuYmYiOjE2NTg1OTIzMTQsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.VtNX1V8gSQ-T3aVVdczKmz41Ul20PDgYypXAe5tLU-Hb_u9qiTw2RvXtDQ2hb-TaZSOCqohba3Ct3YtlNqBkpe1SpNihCb1acdka4Mag6r9lfE43CupcHQjW8REUPq9KG4_XVbCo_ei9LguEwcCySYzzr5Sr9Vf6XLOEfOrFoGKv9c1jsZHKWwnakOi7zlWgihjhcrgQogRynZm4y-MxqQyng-V0mWoi373uj_ZqIu21RSA6tvp8LNZsrzDl3zd2QV4mopnQHnTMQwAsbdTwBz4pBkVAQiP0yEFDPwjiTXDc9GANYWrDVIuqUcUeB1ctPZnAyvo-BIbduS1IrADz9A

只能通过https访问,输入对应地址和端口号之后,复制刚刚的token粘贴即可正常登录进去

4a2fceabc3324786a04ef49cf9833ef1.png

be626256a5664a0a98f8fc4a31118f08.png部署到这里就结束了,希望大家看了这个博客也能顺利完成部署,如果博客有任何问题也欢迎大家联系我修改!

相关实践学习
通过Ingress进行灰度发布
本场景您将运行一个简单的应用,部署一个新的应用用于新的发布,并通过Ingress能力实现灰度发布。
容器应用与集群管理
欢迎来到《容器应用与集群管理》课程,本课程是“云原生容器Clouder认证“系列中的第二阶段。课程将向您介绍与容器集群相关的概念和技术,这些概念和技术可以帮助您了解阿里云容器服务ACK/ACK Serverless的使用。同时,本课程也会向您介绍可以采取的工具、方法和可操作步骤,以帮助您了解如何基于容器服务ACK Serverless构建和管理企业级应用。 学习完本课程后,您将能够: 掌握容器集群、容器编排的基本概念 掌握Kubernetes的基础概念及核心思想 掌握阿里云容器服务ACK/ACK Serverless概念及使用方法 基于容器服务ACK Serverless搭建和管理企业级网站应用
目录
相关文章
|
11天前
|
Kubernetes Ubuntu Linux
我应该如何安装Kubernetes
我应该如何安装Kubernetes
|
1月前
|
Kubernetes Ubuntu Docker
从0开始搞K8S:使用Ubuntu进行安装(环境安装)
通过上述步骤,你已经在Ubuntu上成功搭建了一个基本的Kubernetes单节点集群。这只是开始,Kubernetes的世界广阔且深邃,接下来你可以尝试部署应用、了解Kubernetes的高级概念如Services、Deployments、Ingress等,以及探索如何利用Helm等工具进行应用管理,逐步提升你的Kubernetes技能树。记住,实践是最好的老师,不断实验与学习,你将逐渐掌握这一强大的容器编排技术。
137 1
|
1月前
|
Kubernetes Linux 开发工具
centos7通过kubeadm安装k8s 1.27.1版本
centos7通过kubeadm安装k8s 1.27.1版本
|
1月前
|
Kubernetes Docker 容器
rancher docker k8s安装(一)
rancher docker k8s安装(一)
41 2
|
1月前
|
Kubernetes Docker 容器
容器运行时Containerd k8s
容器运行时Containerd k8s
40 3
|
1月前
|
存储 Kubernetes 负载均衡
基于Ubuntu-22.04安装K8s-v1.28.2实验(四)使用域名访问网站应用
基于Ubuntu-22.04安装K8s-v1.28.2实验(四)使用域名访问网站应用
29 1
|
1月前
|
Kubernetes 网络安全 容器
基于Ubuntu-22.04安装K8s-v1.28.2实验(一)部署K8s
基于Ubuntu-22.04安装K8s-v1.28.2实验(一)部署K8s
215 2
|
1月前
|
负载均衡 应用服务中间件 nginx
基于Ubuntu-22.04安装K8s-v1.28.2实验(二)使用kube-vip实现集群VIP访问
基于Ubuntu-22.04安装K8s-v1.28.2实验(二)使用kube-vip实现集群VIP访问
53 1
|
1月前
|
Kubernetes Docker 容器
rancher docker k8s安装(二)
rancher docker k8s安装(二)
48 0
|
1月前
|
Kubernetes 容器
基于Ubuntu-22.04安装K8s-v1.28.2实验(三)数据卷挂载NFS(网络文件系统)
基于Ubuntu-22.04安装K8s-v1.28.2实验(三)数据卷挂载NFS(网络文件系统)
136 0
下一篇
无影云桌面