K8S 二进制部署-4

简介: K8S 二进制部署

九、master02 节点部署


//从 master01 节点上拷贝证书文件、各master组件的配置文件和服务管理文件到 master02 节点
scp -r /opt/etcd/ root@192.168.147.101:/opt/
scp -r /opt/kubernetes/ root@192.168.147.101:/opt
scp -r /root/.kube root@192.168.147.101:/root
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.147.101:/usr/lib/systemd/system/
//修改配置文件kube-apiserver中的IP
vim /opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=2 \
--etcd-servers=https://192.168.10.80:2379,https://192.168.10.18:2379,https://192.168.10.19:2379 \
--bind-address=192.168.147.101 \        #修改
--secure-port=6443 \
--advertise-address=192.168.147.101 \     #修改
......

cf8d14c19b87489e8e8500dfa5ffc73e.png

//在 master02 节点上启动各服务并设置开机自启
systemctl start kube-apiserver.service
systemctl enable kube-apiserver.service
systemctl start kube-controller-manager.service
systemctl enable kube-controller-manager.service
systemctl start kube-scheduler.service
systemctl enable kube-scheduler.service

e058dae08813489695869b6d994a0fbd.png

//查看node节点状态
ln -s /opt/kubernetes/bin/* /usr/local/bin/
kubectl get nodes
kubectl get nodes -o wide     #-o=wide:输出额外信息;对于Pod,将输出Pod所在的Node名
//此时在master02节点查到的node节点状态仅是从etcd查询到的信息,而此时node节点实际上并未与master02节点建立通信连接,因此需要使用一个VIP把node节点与master节点都关联起来

09f1220d742e4827bb176f9f0f33e4fc.png


十、负载均衡部署


配置load balancer集群双机热备负载均衡(nginx实现负载均衡,keepalived实现双机热备)


在lb01、lb02节点上操作


//配置nginx的官方在线yum源,配置本地nginx的yum源
cat > /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
EOF

1ec3d6af30cd4d06ab30717c7c3b5ff0.png

yum install nginx -y
//修改nginx配置文件,配置四层反向代理负载均衡,指定k8s群集2台master的节点ip和6443端口
vim /etc/nginx/nginx.conf
events {
    worker_connections  1024;
}
#添加
stream {
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;
upstream k8s-apiserver {
    server 192.168.147.100:6443;
    server 192.168.147.101:6443;
}
server {
    listen 6443;
    proxy_pass k8s-apiserver;
}
}
http {
......


8c99c2ca788c4d34b368d745be4fa70d.png


//检查配置文件语法
nginx -t   

7756478d627840d7a9dd558fe54dad1a.png


//启动nginx服务,查看已监听6443端口
systemctl start nginx
systemctl enable nginx
netstat -natp | grep nginx 


c0d5db6de13b428fa860bfaa1f950ceb.png

76c8d4d7aebe4267b6e0718746ae29d5.png

//部署keepalived服务
yum install keepalived -y
//修改keepalived配置文件
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
接收邮件地址
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
邮件发送地址
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER #lb01节点的为 NGINX_MASTER,lb02节点的为 NGINX_BACKUP
}
#添加一个周期性执行的脚本
vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"  #指定检查nginx存活的脚本路径
}
vrrp_instance VI_1 {
    state MASTER      #lb01节点的为 MASTER,lb02节点的为 BACKUP
    interface ens33     #指定网卡名称 ens33
    virtual_router_id 51  #指定vrid,两个节点要一致
    priority 100      #lb01节点的为 100,lb02节点的为 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.147.200/24  #指定 VIP
    }
    track_script {
        check_nginx     #指定vrrp_script配置的脚本
    }
}
//创建nginx状态检查脚本 
vim /etc/nginx/check_nginx.sh
#!/bin/bash
#egrep -cv "grep|$$" 用于过滤掉包含grep 或者 $$ 表示的当前Shell进程ID,即脚本运行的当前进程ID号
count=$(ps -ef | grep nginx | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
chmod +x /etc/nginx/check_nginx.sh

93f174c8931b4228b99e2d56b13d037f.png


//启动keepalived服务(一定要先启动了nginx服务,再启动keepalived服务)
systemctl start keepalived
systemctl enable keepalived
ip a        #查看VIP是否生成

3343092e9e814df3b184eb7132c022d8.png

//修改node节点上的bootstrap.kubeconfig,kubelet.kubeconfig配置文件为VIP
cd /opt/kubernetes/cfg/
vim bootstrap.kubeconfig 
server: https://192.168.147.200:6443
vim kubelet.kubeconfig
server: https://192.168.147.200:6443
vim kube-proxy.kubeconfig
server: https://192.168.147.200:6443
//重启kubelet和kube-proxy服务
systemctl restart kubelet.service 
systemctl restart kube-proxy.service
//在 lb01 上查看 nginx 和 node 、 master 节点的连接状态
netstat -natp | grep nginx
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      15257/nginx: master 
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      15257/nginx: master 
tcp        0      0 192.168.147.106:57350   192.168.147.100:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:47768   192.168.147.101:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:47784   192.168.147.101:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:47796   192.168.147.101:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54462   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:57338   192.168.147.100:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54456   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:47776   192.168.147.101:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:47792   192.168.147.101:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49827   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49868   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54470   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49872   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:47770   192.168.147.101:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:57332   192.168.147.100:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49866   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:47780   192.168.147.101:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49874   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:57348   192.168.147.100:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49884   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:57354   192.168.147.100:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54466   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54468   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.106:47786   192.168.147.101:6443    ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54458   ESTABLISHED 15258/nginx: worker 
tcp        0      0 192.168.147.106:57324   192.168.147.100:6443    ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.105:54438   ESTABLISHED 15259/nginx: worker 
tcp        0      0 192.168.147.200:6443    192.168.147.102:49864   ESTABLISHED 15258/nginx: worker 

1aa943d2549845169ab873dc79fd63f0.png


在 master01 节点上操作
//测试创建pod
kubectl run nginx --image=nginx
//查看Pod的状态信息
kubectl get pods
NAME                    READY   STATUS              RESTARTS   AGE
nginx-dbddb74b8-nf9sk   0/1     ContainerCreating   0          33s   #正在创建中
kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-dbddb74b8-nf9sk   1/1     Running   0          80s        #创建完成,运行中
kubectl get pods -o wide
NAME    READY   STATUS    RESTARTS   AGE    IP            NODE              NOMINATED NODE   READINESS GATES
nginx   1/1     Running   0          3m4s   10.244.1.15   192.168.147.105   <none>           <none>
//READY为1/1,表示这个Pod中有1个容器

5796ee81229f40b5b557def8abe21fb8.png

//在对应网段的node节点上操作,可以直接使用浏览器或者curl命令访问
curl 10.244.1.15
//这时在master01节点上查看nginx日志
kubectl logs nginx-dbddb74b8-nf9sk


d2e8a9cc99eb4d849e98cc342e302b10.png


十一、部署 Dashboard


Dashboard 介绍

仪表板是基于Web的Kubernetes用户界面。您可以使用仪表板将容器化应用程序部署到Kubernetes集群,对容器化应用程序进行故障排除,并管理集群本身及其伴随资源。您可以使用仪表板来概述群集上运行的应用程序,以及创建或修改单个Kubernetes资源(例如deployment,job,daemonset等)。例如,您可以使用部署向导扩展部署,启动滚动更新,重新启动Pod或部署新应用程序。仪表板还提供有关群集中Kubernetes资源状态以及可能发生的任何错误的信息。

//在 node 上操作
#上传dashboard.tar和metrics-scraper.tar 到 /opt 下
scp dashboard.tar metrics-scraper.tar node02:/opt/
docker load -i dashboard.tar
docker load -i metrics-scraper.tar
//在 master01 节点上操作
#上传 recommended.yaml 文件到 /opt/k8s 目录中
cd /opt/k8s
vim recommended.yaml
#默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
   - port: 443
     targetPort: 8443
     nodePort: 30001     #添加
       type: NodePort          #添加
       selector:
         k8s-app: kubernetes-dashboard
kubectl apply -f recommended.yaml

72c04ca64df6435c8f3e8a075f432a8a.png


#创建service account并绑定默认cluster-admin管理员集群角色
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

35324dbaec3042aa9909511c28de3a13.png



#使用输出的token登录Dashboard
https://192.168.147.102:30001

c476ca8ede63453cb95e0296555eb83a.png


相关实践学习
容器服务Serverless版ACK Serverless 快速入门:在线魔方应用部署和监控
通过本实验,您将了解到容器服务Serverless版ACK Serverless 的基本产品能力,即可以实现快速部署一个在线魔方应用,并借助阿里云容器服务成熟的产品生态,实现在线应用的企业级监控,提升应用稳定性。
云原生实践公开课
课程大纲 开篇:如何学习并实践云原生技术 基础篇: 5 步上手 Kubernetes 进阶篇:生产环境下的 K8s 实践 相关的阿里云产品:容器服务&nbsp;ACK 容器服务&nbsp;Kubernetes&nbsp;版(简称&nbsp;ACK)提供高性能可伸缩的容器应用管理能力,支持企业级容器化应用的全生命周期管理。整合阿里云虚拟化、存储、网络和安全能力,打造云端最佳容器化应用运行环境。 了解产品详情:&nbsp;https://www.aliyun.com/product/kubernetes
目录
相关文章
|
22天前
|
Kubernetes 搜索推荐 网络协议
使用 kubeadm 部署 Kubernetes 集群(三)kubeadm 初始化 k8s 证书过期解决方案
使用 kubeadm 部署 Kubernetes 集群(三)kubeadm 初始化 k8s 证书过期解决方案
37 8
|
3天前
|
Kubernetes 应用服务中间件 nginx
K8S二进制部署详解,一文教会你部署高可用K8S集群(二)
K8S二进制部署详解,一文教会你部署高可用K8S集群(二)
|
3天前
|
Kubernetes 网络安全 数据安全/隐私保护
K8S二进制部署详解,一文教会你部署高可用K8S集群(一)
K8S二进制部署详解,一文教会你部署高可用K8S集群(一)
|
3天前
|
Kubernetes 网络协议 Python
一文教会你,如何通过kubeadm,在生产环境部署K8S高可用集群(二)
一文教会你,如何通过kubeadm,在生产环境部署K8S高可用集群(二)
|
3天前
|
Kubernetes 应用服务中间件 开发工具
一文教会你,如何通过kubeadm,在生产环境部署K8S高可用集群(一)
一文教会你,如何通过kubeadm,在生产环境部署K8S高可用集群(一)
|
7天前
|
Kubernetes 负载均衡 应用服务中间件
部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)下
部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)下
部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)下
|
7天前
|
Kubernetes 安全 前端开发
部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)上
部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)上
|
7天前
|
Kubernetes Ubuntu Linux
Kubernetes(K8S)集群管理Docker容器(部署篇)
Kubernetes(K8S)集群管理Docker容器(部署篇)
|
8天前
|
Kubernetes Shell 网络安全
Shell脚本快速部署Kubernetes(K8S v1.1版本)集群系统
Shell脚本快速部署Kubernetes(K8S v1.1版本)集群系统
|
8天前
|
Kubernetes Ubuntu Docker
Kubernetes(K8S v1.1版本) 集群管理Docker容器之部署篇
Kubernetes(K8S v1.1版本) 集群管理Docker容器之部署篇

推荐镜像

更多