7.安装kubernetes组件
(三台虚拟机都要执行)
#修改源 [root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo > [kubernetes] > name=Kubernetes > baseurl=https://repo.huaweicloud.com/kubernetes/yum/repos/kubernetes-el7-x86_64 > enabled=1 > gpgcheck=1 > repo_gpgcheck=0 > gpgkey=https://repo.huaweicloud.com/kubernetes/yum/doc/yum-key.gpg https://repo.huaweicloud.com/kubernetes/yum/doc/rpm-package-key.gpg > EOF [root@master ~]# cat /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://repo.huaweicloud.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=0 gpgkey=https://repo.huaweicloud.com/kubernetes/yum/doc/yum-key.gpg https://repo.huaweicloud.com/kubernetes/yum/doc/rpm-package-key.gpg #更新索引文件 [root@master ~]# yum clean all 已加载插件:fastestmirror 正在清理软件源: base docker-ce-stable extras kubernetes updates Cleaning up list of fastest mirrors [root@master ~]# yum makecache fast 已加载插件:fastestmirror Determining fastest mirrors * base: mirrors.ustc.edu.cn * extras: mirrors.ustc.edu.cn * updates: mirrors.ustc.edu.cn base | 3.6 kB 00:00:00 docker-ce-stable | 3.5 kB 00:00:00 extras | 2.9 kB 00:00:00 kubernetes | 1.4 kB 00:00:00 updates | 2.9 kB 00:00:00 (1/7): base/7/x86_64/group_gz | 153 kB 00:00:00 (2/7): docker-ce-stable/7/x86_64/updateinfo | 55 B 00:00:00 (3/7): extras/7/x86_64/primary_db | 249 kB 00:00:00 (4/7): docker-ce-stable/7/x86_64/primary_db | 87 kB 00:00:00 (5/7): base/7/x86_64/primary_db | 6.1 MB 00:00:01 (6/7): kubernetes/primary | 102 kB 00:00:02 (7/7): updates/7/x86_64/primary_db | 17 MB 00:00:03 kubernetes 751/751 元数据缓存已建立 #安装 kubeadm-1.18.1 kubelet-1.18.1 kubectl-1.18.1 [root@node1 ~]# yum install kubeadm-1.18.1 kubelet-1.18.1 kubectl-1.18.1 -y #kubeadm:用来初始化集群的指令。 #kubelet:在集群中的每个节点上用来启动 Pod 和容器等。 #kubectl:用来与集群通信的命令行工具。 #设置kubelet开机自启(这里设置开机自启就行不用启动它后面集群启动的时候会自动启动它) [root@master ~]# systemctl enable kubelet Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
8.准备集群镜像
(只三台虚拟机都需要执行)
#在安装kubernetes之前,必须准备好所需要的镜像,所需要的镜像可以通过下面的命令查看 #(初始化集群默认是从官方拉取的镜像由于官方的速度比较慢,我们提前准备好) [root@master ~]# kubeadm config images list I1109 09:24:29.717650 8643 version.go:252] remote version is much newer: v1.25.3; falling back to: stable-1.18 W1109 09:24:30.774201 8643 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] k8s.gcr.io/kube-apiserver:v1.18.1 k8s.gcr.io/kube-controller-manager:v1.18.1 k8s.gcr.io/kube-scheduler:v1.18.1 k8s.gcr.io/kube-proxy:v1.18.1 k8s.gcr.io/pause:3.2 k8s.gcr.io/etcd:3.4.3-0 k8s.gcr.io/coredns:1.6.7 images=( kube-apiserver:v1.18.1 kube-controller-manager:v1.18.1 kube-scheduler:v1.18.1 kube-proxy:v1.18.1 pause:3.2 etcd:3.4.3-0 coredns:1.6.7 ) #定义一个要下载镜像的列表 [root@master ~]# images=( > kube-apiserver:v1.18.1 > kube-controller-manager:v1.18.1 > kube-scheduler:v1.18.1 > kube-proxy:v1.18.1 > pause:3.2 > etcd:3.4.3-0 > coredns:1.6.7 > ) for imageName in ${images[@]} ; do docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName done #循环安装镜像并重新打标签 [root@master ~]# for imageName in ${images[@]} ; do > docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName > docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName > docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName > done #安装完成后查看docker拉取的镜像列表 [root@master ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE k8s.gcr.io/kube-proxy v1.18.1 4e68534e24f6 2 years ago 117MB k8s.gcr.io/kube-apiserver v1.18.1 a595af0107f9 2 years ago 173MB k8s.gcr.io/kube-controller-manager v1.18.1 d1ccdd18e6ed 2 years ago 162MB k8s.gcr.io/kube-scheduler v1.18.1 6c9320041a7b 2 years ago 95.3MB k8s.gcr.io/pause 3.2 80d28bedfe5d 2 years ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 2 years ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 3 years ago 288MB
kubeadm init \ --kubernetes-version=v1.18.1 \ #kubernetes版本号 --pod-network-cidr=10.244.0.0/16 \ --service-cidr=10.96.0.0/12 \ --apiserver-advertise-address=192.168.30.136 #master节点ip地址 [root@master ~]# kubeadm init \ > --kubernetes-version=v1.18.1 \ > --pod-network-cidr=10.244.0.0/16 \ > --service-cidr=10.96.0.0/12 \ > --apiserver-advertise-address=192.168.30.136 W1109 09:43:30.134252 9070 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] [init] Using Kubernetes version: v1.18.1 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.30.136] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [192.168.30.136 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [192.168.30.136 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" W1109 09:43:32.987637 9070 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-scheduler" W1109 09:43:32.988774 9070 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 14.502578 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: fdulmj.ur63oouawk7chggv [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! #您的Kubernetes控制平面 已成功初始化! To start using your cluster, you need to run the following as a regular user: #要 开始使用您的集群,您需要以普通用户的身份运行以下命令:(需要执行这三条命令) mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. #部署pod网络需要执行下面这条命令 Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: #如果你想加入工作节点你需要在node节点已管理员身份执行下面这条命令 kubeadm join 192.168.30.136:6443 --token fdulmj.ur63oouawk7chggv \ --discovery-token-ca-cert-hash sha256:e5cdfc22feb9f6fdbae51140e216fb18a83c37a0aa6f6f14656920ea70946513 # 出现下面这句话就说明安装成功了 Your Kubernetes control-plane has initialized successfully ! [root@master ~]# mkdir -p $HOME/.kube [root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config #两个node节点加入集群中 kubeadm join 192.168.30.136:6443 --token fdulmj.ur63oouawk7chggv \ --discovery-token-ca-cert-hash sha256:e5cdfc22feb9f6fdbae51140e216fb18a83c37a0aa6f6f14656920ea70946513 [root@node ~]# kubeadm join 192.168.30.136:6443 --token fdulmj.ur63oouawk7chggv \ > --discovery-token-ca-cert-hash sha256:e5cdfc22feb9f6fdbae51140e216fb18a83c37a0aa6f6f14656920ea70946513 W1109 10:47:05.586948 9099 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubeletconfig-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. #执行完成后在master执行kubectl get nodes 看看有没有加入成功 [root@master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master NotReady master 63m v1.18.1 node NotReady <none> 12s v1.18.1 node1 NotReady <none> 2s v1.18.1 #现在状态是NotReady(未准备好的)这是因为没有安装网络插件