编译一个nginx做负载均衡
如果不想编译的话也可以直接yum/apt安装nginx,但是需要额外安装模块
CentOS
yum -y install nginx yum -y install nginx-all-modules.noarch
Ubuntu
apt -y install nginx
- 安装编译需要的软件
CentOS
yum -y install gcc gcc-c++ //C语言环境 yum -y install pcre pcre-devel //正则 yum -y install zlib zlib-devel //lib包 yum -y install openssl openssl-devel make
Ubuntu
apt-get install gcc apt-get install libpcre3 libpcre3-dev apt-get install zlib1g zlib1g-dev apt-get install make
- 下载源码包
wget http://nginx.org/download/nginx-1.20.1.tar.gz tar -xf nginx-1.20.1.tar.gz cd nginx-1.20.1/
- 编译部署nginx
./configure --prefix=/opt --with-stream --without-http --without-http_uwsgi_module && \ make && \ make install
- 编写nginx配置文件
cat > /opt/nginx.conf <<EOF worker_processes auto; events { worker_connections 1024; } stream { upstream backend { hash \$remote_addr consistent; server $master1:6443 max_fails=3 fail_timeout=30s; server $master2:6443 max_fails=3 fail_timeout=30s; server $master3:6443 max_fails=3 fail_timeout=30s; } server { listen *:8443; proxy_connect_timeout 1s; proxy_pass backend; } } EOF
- 编写nginx的service文件
cat > /opt/kube-nginx.service <<EOF [Unit] Description=kube-apiserver nginx proxy After=network.target After=network-online.target Wants=network-online.target [Service] Type=forking ExecStartPre=/usr/local/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx -t ExecStart=/usr/local/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx ExecReload=/usr/local/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx -s reload PrivateTmp=true Restart=always RestartSec=5 StartLimitInterval=0 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
- 分发文件并启动nginx
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $host "mkdir /etc/nginx/logs -p" scp /opt/sbin/nginx $host:/usr/local/bin/ scp /opt/nginx.conf $host:/etc/nginx/nginx.conf scp /opt/kube-nginx.service $host:/etc/systemd/system/ ssh $host "systemctl daemon-reload " ssh $host "systemctl enable kube-nginx" ssh $host "systemctl restart kube-nginx" done
二进制安装etcd
- 下载二进制包
wget https://ghproxy.com/https://github.com/etcd-io/etcd/releases/download/v3.4.23/etcd-v3.4.23-linux-amd64.tar.gz tar -xf etcd-v3.4.23-linux-amd64.tar.gz
准备etcd的service文件
cat <<EOF > etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/local/bin/etcd \\ --name=##NODE_NAME## \\ --cert-file=/etc/etcd/ssl/etcd.pem \\ --key-file=/etc/etcd/ssl/etcd-key.pem \\ --peer-cert-file=/etc/etcd/ssl/etcd.pem \\ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\ --trusted-ca-file=/etc/etcd/ssl/ca.pem \\ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\ --initial-advertise-peer-urls=https://##NODE_IP##:2380 \\ --listen-peer-urls=https://##NODE_IP##:2380 \\ --listen-client-urls=https://##NODE_IP##:2379,http://127.0.0.1:2379 \\ --advertise-client-urls=https://##NODE_IP##:2379 \\ --initial-cluster-token=etcd-cluster \\ --initial-cluster=##ETCD_CLUSTERS## \\ --initial-cluster-state=new \\ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
准备etcd所需证书
- 下载cfssl工具制作证书
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 && mv cfssl_linux-amd64 /usr/local/bin/cfssl wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 && mv cfssljson_linux-amd64 /usr/local/bin/cfssljson wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 && mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfsslinfo
- 创建根证书配置
mkdir pki cd !$ cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "etcd": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "876000h" } } } } EOF
- 创建ca证书请求文件
cat > ca-csr.json <<EOF { "CN": "etcd", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "HuBei", "L": "WuHan", "O": "etcd", "OU": "etcd" } ], "ca": { "expiry": "876000h" } } EOF
- 创建etcd证书请求文件
cat <<EOF > etcd-csr.json { "CN": "etcd", "hosts": [ "127.0.0.1", "$master1", "$master2", "$master3" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "HuBei", "L": "WuHan", "O": "etcd", "OU": "etcd" } ] } EOF
- 初始化ca证书,利用生成的根证书生成etcd证书文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd etcd-csr.json |cfssljson -bare etcd
分发证书、二进制文件及service文件
source /opt/k8s_env.sh for host in ${MASTERS[@]};do ssh $host "mkdir /etc/etcd/ssl/ -p" ssh $host "mkdir /var/lib/etcd" scp /opt/pki/etcd* $host:/etc/etcd/ssl/ scp /opt/pki/ca* $host:/etc/etcd/ssl/ done for host in ${MASTERS[@]};do IP=$(eval echo "$"$host) sed -e "s@##NODE_NAME##@$host@g" -e "s@##NODE_IP##@$IP@g" -e "s@##ETCD_CLUSTERS##@$ETCD_CLUSTERS@g" etcd.service > etcd.service.$host scp etcd.service.$host $host:/etc/systemd/system/etcd.service scp /opt/etcd-v3.4.23-linux-amd64/etcd* $host:/usr/local/bin/ ssh $host "systemctl daemon-reload" ssh $host "systemctl enable etcd" ssh $host "systemctl restart etcd" done
检查etcd是否正常
ETCDCTL_API=3 etcdctl --endpoints=$ETCD_ENDPOINTS --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint status --write-out=table +---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ | ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | +---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ | https://10.10.21.232:2379 | 60e9bfc3d268a8a9 | 3.4.23 | 20 kB | true | false | 12 | 27 | 27 | | | https://10.10.21.233:2379 | c3f9a83d12a6d10d | 3.4.23 | 20 kB | false | false | 12 | 27 | 27 | | | https://10.10.21.234:2379 | 1d0903088fe311d1 | 3.4.23 | 20 kB | false | false | 12 | 27 | 27 | | +---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
配置containerd
下载crictl二进制文件
wget https://ghproxy.com/https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.26.0/crictl-v1.26.0-linux-amd64.tar.gz tar -xf crictl-v1.26.0-linux-amd64.tar.gz cat > /etc/crictl.yaml <<EOF runtime-endpoint: "unix:///run/containerd/containerd.sock" image-endpoint: "unix:///run/containerd/containerd.sock" timeout: 10 #超时时间不宜过短,我这里修改成10秒了 debug: false pull-image-on-create: false disable-pull-on-run: false EOF
分发配置文件及crictl二进制文件
for host in ${WORKS[@]};do ssh $host "containerd config default | tee /etc/containerd/config.toml" ssh $host 'sed -i "s@systemd_cgroup\ \=\ false@systemd_cgroup\ \=\ true@g" /etc/containerd/config.toml' ssh $host 'sed -i "s@registry.k8s.io@registry.aliyuncs.com/google_containers@g" /etc/containerd/config.toml' ssh $host 'sed -i "s@runtime_type.*@runtime_type\ \=\ \"io.containerd.runtime.v1.linux\"@g" /etc/containerd/config.toml' scp crictl $host:/usr/local/bin/crictl scp /etc/crictl.yaml $host:/etc/crictl.yaml ssh $host "systemctl daemon-reload" ssh $host "systemctl enable containerd" ssh $host "systemctl restart containerd" done
kubeadm初始化集群
编写init文件
cat <<EOF > kubeadm_init.yaml apiVersion: kubeadm.k8s.io/v1beta3 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: ##master1## bindPort: 6443 nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock imagePullPolicy: IfNotPresent name: $(hostname) taints: null --- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: {} etcd: external: #这里默认是local,由于我们用的外部etcd,所以需要修改 endpoints: - https://$master1:2379 - https://$master2:2379 - https://$master3:2379 #搭建etcd集群时生成的ca证书 caFile: /etc/etcd/ssl/ca.pem #搭建etcd集群时生成的客户端证书 certFile: /etc/etcd/ssl/etcd.pem #搭建etcd集群时生成的客户端密钥 keyFile: /etc/etcd/ssl/etcd-key.pem imageRepository: registry.aliyuncs.com/google_containers kind: ClusterConfiguration kubernetesVersion: 1.26.0 controlPlaneEndpoint: 127.0.0.1:8443 # 负载均衡的IP+Port networking: dnsDomain: cluster.local serviceSubnet: ##SERVICE_CIDR## podSubnet: ##CLUSTER_CIDR## scheduler: {} --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs #kube-proxy模式 --- apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: cacheTTL: 0s enabled: true x509: clientCAFile: /etc/kubernetes/pki/ca.crt authorization: mode: Webhook webhook: cacheAuthorizedTTL: 0s cacheUnauthorizedTTL: 0s cgroupDriver: systemd clusterDNS: - ##CLUSTER_KUBERNETES_SVC_IP## clusterDomain: cluster.local cpuManagerReconcilePeriod: 0s evictionPressureTransitionPeriod: 0s fileCheckFrequency: 0s healthzBindAddress: 127.0.0.1 healthzPort: 10248 httpCheckFrequency: 0s imageMinimumGCAge: 0s kind: KubeletConfiguration logging: flushFrequency: 0 options: json: infoBufferSize: "0" verbosity: 0 memorySwap: {} nodeStatusReportFrequency: 0s nodeStatusUpdateFrequency: 0s resolvConf: /run/systemd/resolve/resolv.conf rotateCertificates: true runtimeRequestTimeout: 0s shutdownGracePeriod: 0s shutdownGracePeriodCriticalPods: 0s staticPodPath: /etc/kubernetes/manifests streamingConnectionIdleTimeout: 0s syncFrequency: 0s volumeStatsAggPeriod: 0s EOF
开始初始化
sed -i "s@##master1##@$master1@g" kubeadm_init.yaml sed -i "s@##SERVICE_CIDR##@$SERVICE_CIDR@g" kubeadm_init.yaml sed -i "s@##CLUSTER_CIDR##@$CLUSTER_CIDR@g" kubeadm_init.yaml sed -i "s@##CLUSTER_KUBERNETES_SVC_IP##@$CLUSTER_KUBERNETES_SVC_IP@g" kubeadm_init.yaml kubeadm init --config=kubeadm_init.yaml
配置kubeconfig
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
安装网络插件
cat <<EOF >> flannel.yaml --- kind: Namespace apiVersion: v1 metadata: name: kube-flannel labels: pod-security.kubernetes.io/enforce: privileged --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-flannel --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-flannel --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-flannel labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "$CLUSTER_CIDR", "Backend": { "Type": "vxlan" } } #POD对应的CIDR --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds namespace: kube-flannel labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux hostNetwork: true priorityClassName: system-node-critical tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni-plugin #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply) image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 command: - cp args: - -f - /flannel - /opt/cni/bin/flannel volumeMounts: - name: cni-plugin mountPath: /opt/cni/bin - name: install-cni #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply) image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply) image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN", "NET_RAW"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: EVENT_QUEUE_DEPTH value: "5000" volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ - name: xtables-lock mountPath: /run/xtables.lock volumes: - name: run hostPath: path: /run/flannel - name: cni-plugin hostPath: path: /opt/cni/bin - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate EOF
kubectl apply -f flannel.yaml
检查是否成功
kubectl get pod -n kube-flannel NAME READY STATUS RESTARTS AGE kube-flannel-ds-bhtq4 1/1 Running 0 5m26s kubectl get node NAME STATUS ROLES AGE VERSION node Ready control-plane 56m v1.26.0
添加节点
添加Worker节点
生成token
kubeadm token create --print-join-command
按提示到其他工作节点进行输入
for host in node1 node2;do ssh $host "kubeadm join 127.0.0.1:8443 --token phyh1z.9nbkxuc2rjwl6lhl --discovery-token-ca-cert-hash sha256:93b8abf312eb1ecec18a3c0317a18bf494b45aafba5f368f2a67489ea4360b7e " ;done
检查是否成功
kubectl get node NAME STATUS ROLES AGE VERSION master1 Ready control-plane 1h v1.26.0 node1 Ready <none> 41s v1.26.0 node2 Ready <none> 119s v1.26.0
添加控制节点
更新证书
kubeadm init phase upload-certs --upload-certs --config kubeadm_init.yaml W1228 23:56:43.166317 98389 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [192.168.0.10]; the provided value is: [192.168.0.1] [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: e55be7a28936f39f26327678a274209049fba18f3d1f4b010570fb439168a844
将之前生成的token和这次生成的key拼接起来即可
for host in master2 master3;do ssh $host "kubeadm join 127.0.0.1:8443 --token phyh1z.9nbkxuc2rjwl6lhl --discovery-token-ca-cert-hash sha256:93b8abf312eb1ecec18a3c0317a18bf494b45aafba5f368f2a67489ea4360b7e --control-plane --certificate-key e55be7a28936f39f26327678a274209049fba18f3d1f4b010570fb439168a844" ;done
检查是否成功
kubectl get node NAME STATUS ROLES AGE VERSION master2 Ready control-plane 6m39s v1.26.0 master3 Ready control-plane 4m55s v1.26.0 master1 Ready control-plane 1h v1.26.0 node1 Ready <none> 16m v1.26.0 node2 Ready <none> 17m v1.26.0
修改配置文件
- 管理节点修改两个文件,工作节点只需要修改kubelet.conf
cd /etc/kubernetes # 修改/etc/kubernetes/admin.conf,/etc/kubernetes/kubelet.conf文件中的server ip改成127.0.0.1 vi /etc/kubernetes/admin.conf vi /etc/kubernetes/kubelet.conf # 覆盖配置 cp /etc/kubernetes/admin.conf ~/.kube/config
- 重启kubelet
source /opt/k8s_env.sh for host in ${HOSTS[@]};do ssh $i systemctl restart kubelet ssh $i systemctl is-active kubelet done
删除旧的证书,生成新证书
cd /etc/kubernetes/pki # 先备份 mv apiserver.key apiserver.key.bak mv apiserver.crt apiserver.crt.bak # 使用如下命令生成,分别在三个master节点上执行 kubeadm init phase certs apiserver --apiserver-advertise-address 当前节点IP --apiserver-cert-extra-sans "127.0.0.1,192.168.0.1" kubeadm init phase certs apiserver --apiserver-advertise-address 当前节点IP --apiserver-cert-extra-sans "127.0.0.1,192.168.0.1" kubeadm init phase certs apiserver --apiserver-advertise-address 当前节点IP --apiserver-cert-extra-sans "127.0.0.1,192.168.0.1" # --apiserver-cert-extra-sans "127.0.0.1":设置了这个,之后加入节点验证证书阶段就不会报错了。