1、系统配置
系统信息
[root@master ~]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
集群环境
10.10.9.11 master
10.10.9.12 node1
10.10.9.13 node2
第一部分:安装前环境设置,所有节点执行
设置主机名,三台设备分别为
hostnamectl set-hostname master
设置 net.bridge
[root@master ~]# cat /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# sysctl -p
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
禁用Selinux
[root@master network]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@master network]# setenforce 0
setenforce: SELinux is disabled
禁用firewalld
systemctl disable firewalld && systemctl stop firewalld
禁用swapoff
swapoff -a
sed -i -e /swap/d /etc/fstab
2、资源下载
yum 源 设置,由于1.92 版本,国内无法直接yum 安装,这里下 载到本地
链接: https://pan.baidu.com/s/1mjiB5Cc 密码: 1vgc
3、安装软件
docker 安装
下面是已经下载好的软件包百度云盘上下载后,解压直接安装
[root@master rpms]# ls
docker-compose.tar.gz docker.tar.gz k8s.tar.gz
[root@master rpms]#
tar -zxvf docker-compose.tar.gz
tar -zxvf docker.tar.gz
tar -zxvf k8s.tar.gz
chmod a+x docker-compose
cp docker-compose /usr/local/bin/
yum localinstall docker/* -y
yum localinstall k8s/* -y
设置存储方式
echo DOCKER_STORAGE_OPTIONS=\" -s overlay --selinux-enabled=false\" > /etc/sysconfig/docker-storage
docker 服务启动
systemctl enable docker && systemctl start docker && docker info
kubelet 服务启动
systemctl enable kubelet.service && systemctl start kubelet.service
4、在master 节点上操作
[root@master images]# ls *
etcd-amd64.tar k8s-dns-dnsmasq-nanny-amd64.tar k8s-dns-sidecar-amd64.tar kube-controller-manager-amd64.tar kubernetes-dashboard.tar pause-amd64.tar
flannel.tar k8s-dns-kube-dns-amd64.tar kube-apiserver-amd64.tar kube-proxy-amd64.tar kube-scheduler-amd64.tar
[root@master images]# for i in `ls *`;do docker load --input $i;done
[root@master images]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
gcr.io/google_containers/kube-controller-manager-amd64 v1.9.2 769d889083b6 2 weeks ago 137.8 MB
gcr.io/google_containers/kube-proxy-amd64 v1.9.2 e6754bb0a529 2 weeks ago 109.1 MB
gcr.io/google_containers/kube-apiserver-amd64 v1.9.2 7109112be2c7 2 weeks ago 210.4 MB
gcr.io/google_containers/kube-scheduler-amd64 v1.9.2 2bf081517538 2 weeks ago 62.71 MB
k8s.gcr.io/kubernetes-dashboard-amd64 v1.8.2 c87ea0497294 3 weeks ago 102.3 MB
gcr.io/google_containers/etcd-amd64 3.1.11 59d36f27cceb 9 weeks ago 193.9 MB
quay.io/coreos/flannel v0.9.1-amd64 2b736d06ca4c 11 weeks ago 51.31 MB
gcr.io/google_containers/k8s-dns-sidecar-amd64 1.14.7 db76ee297b85 3 months ago 42.03 MB
gcr.io/google_containers/k8s-dns-kube-dns-amd64 1.14.7 5d049a8c4eec 3 months ago 50.27 MB
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64 1.14.7 5feec37454f4 3 months ago 40.95 MB
gcr.io/google_containers/pause-amd64 3.0 99e59f495ffa 21 months ago 746.9 kB
[root@master images]#
用kubeadm 初始化集群
[root@master ~]# kubeadm init --kubernetes-version=v1.9.2 --pod-network-cidr=10.96.0.0/12
[init] Using Kubernetes version: v1.9.2
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
[WARNING Hostname]: hostname "master" could not be reached
[WARNING Hostname]: hostname "master" lookup master on 114.114.114.114:53: no such host
[WARNING FileExisting-crictl]: crictl not found in system path
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.10.9.11]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "scheduler.conf"
[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests".
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 28.003003 seconds
[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[markmaster] Will mark node master as master by adding a label and a taint
[markmaster] Master master tainted and labelled with key/value: node-role.kubernetes.io/master=""
[bootstraptoken] Using token: db182b.a8ffd6b5a96be72c
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join --token db182b.a8ffd6b5a96be72c 10.10.9.11:6443 --discovery-token-ca-cert-hash sha256:9bbe0d28f25df136bab65ee0fbf98d538cb61d64582460f9c539e1e96106ddba
设置环境变量
[root@master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile [root@master ~]# source /etc/profile
查看
kubectl get pods --all-namespaces
[root@master ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system etcd-master 1/1 Running 0 2m
kube-system kube-apiserver-master 1/1 Running 0 2m
kube-system kube-controller-manager-master 1/1 Running 0 2m
kube-system kube-dns-6f4fd4bdf-57dbk 0/3 Pending 0 3m
kube-system kube-proxy-sx6wc 1/1 Running 0 3m
kube-system kube-scheduler-master 1/1 Running 0 2m
[root@master ~]#
安装网络
[root@master network]# kubectl apply -f kube-flannel-rbac.yml
clusterrole "flannel" created
clusterrolebinding "flannel" created
[root@master network]# kubectl apply -f kube-flannel.yml
serviceaccount "flannel" created
configmap "kube-flannel-cfg" created
daemonset "kube-flannel-ds" created
安装dashboard
[root@master network]# kubectl create -f kubernetes-dashboard.yml
serviceaccount "kubernetes-dashboard" created
clusterrolebinding "kubernetes-dashboard" created
deployment "kubernetes-dashboard" created
service "kubernetes-dashboard" created
5、在两个节点上执行
导入镜像
[root@node1 images]# docker load --input pause-amd64.tar
[root@node1 images]# docker load --input kube-proxy-amd64.tar
Loaded image: gcr.io/google_containers/kube-proxy-amd64:v1.9.2
[root@node1 images]# docker load --input flannel.tar
[root@node1 images]# docker load --input kubernetes-dashboard.tar
加入集群
[root@node1 images]# kubeadm join --token db182b.a8ffd6b5a96be72c 10.10.9.11:6443 --discovery-token-ca-cert-hash sha256:9bbe0d28f25df136bab65ee0fbf98d538cb61d64582460f9c539e1e96106ddba
[preflight] Running pre-flight checks.
[WARNING Hostname]: hostname "node1" could not be reached
[WARNING Hostname]: hostname "node1" lookup node1 on 114.114.114.114:53: no such host
[WARNING FileExisting-crictl]: crictl not found in system path
[discovery] Trying to connect to API Server "10.10.9.11:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://10.10.9.11:6443"
[discovery] Requesting info from "https://10.10.9.11:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "10.10.9.11:6443"
[discovery] Successfully established connection with API Server "10.10.9.11:6443"
This node has joined the cluster:
* Certificate signing request was sent to master and a response
was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
[root@node1 images]#
6、查看
在master 上检查服务
[root@master network]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system etcd-master 1/1 Running 0 6m
kube-system kube-apiserver-master 1/1 Running 0 6m
kube-system kube-controller-manager-master 1/1 Running 0 6m
kube-system kube-dns-6f4fd4bdf-57dbk 3/3 Running 0 7m
kube-system kube-flannel-ds-gw8d4 2/2 Running 0 2m
kube-system kube-flannel-ds-qqr2x 2/2 Running 1 1m
kube-system kube-flannel-ds-rjgq8 2/2 Running 1 1m
kube-system kube-proxy-4jxlk 1/1 Running 0 1m
kube-system kube-proxy-k98g5 1/1 Running 0 1m
kube-system kube-proxy-sx6wc 1/1 Running 0 7m
kube-system kube-scheduler-master 1/1 Running 0 6m
kube-system kubernetes-dashboard-554b4c5c69-frbgs 1/1 Running 0 2m
查看节点状态
[root@master network]# kubectl get nodes NAME STATUS ROLES AGE VERSION master Ready master 7m v1.9.2 node1 Ready <none> 1m v1.9.2 node2 Ready <none> 1m v1.9.2
查看端口服务
root@master network]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 9m
[root@master network]# kubectl get svc --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 9m
kube-system kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP 9m
kube-system kubernetes-dashboard NodePort 10.106.134.6 <none> 80:31234/TCP 3m
登录dashaboard
