kubernetes Spring Cloud 微服务架构—(10) Kubernetes spring cloud 微服务Kubernetes 1.15 安装

本文涉及的产品
注册配置 MSE Nacos/ZooKeeper,118元/月
服务治理 MSE Sentinel/OpenSergo,Agent数量 不受限
云原生网关 MSE Higress,422元/月
简介: (10) Kubernetes spring cloud 微服务Kubernetes 1.15 安装

4.1 初始化工具安装

#所有节点
[root@master-1 ~]# systemctl stop firewalld
[root@master-1 ~]# systemctl disable firewalld
[root@master-1 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@master-1 ~]# reboot

4.3设置时区

/

#所有节点
[root@master-1 ~]# systemctl stop firewalld
[root@master-1 ~]# systemctl disable firewalld
[root@master-1 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@master-1 ~]# reboot

4.4关闭交换分区

#所有节点
[root@master-1 ~]# swapoff -a
[root@master-1 ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

4.5设置系统时间同步

#所有节点
[root@master-1 ~]# yum install -y ntpdate
[root@master-1 ~]# ntpdate -u ntp.api.bz
[root@master-1 ~]# echo "*/5 * * * * ntpdate time7.aliyun.com >/dev/null 2>&1" >> /etc/crontab
[root@master-1 ~]# service crond restart
[root@master-1 ~]# chkconfig crond on

4.6 设置主机名

#所有节点

[root@master-1 ~]# cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.91.18  master-1
192.168.91.19  master-2
192.168.91.20  master-3
192.168.91.21  node-1
192.168.91.22  node-2
EOF

4.7 设置免密码登录

#从任意Master节点分发配置到其他所有的节点(包括其他的Master与Node)
#本例中从master-1分发
[root@master-1 ~]# yum install -y expect
[root@master-1 ~]# ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa
#密码更换
[root@master-1 ~]# export mypass=123456s
[root@master-1 ~]# name=(master-1 master-2 master-3 node-1 node-2)
[root@master-1 ~]# for i in ${name[@]};do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
  expect {
    \"*yes/no*\" {send \"yes\r\"; exp_continue}
    \"*password*\" {send \"$mypass\r\"; exp_continue}
    \"*Password*\" {send \"$mypass\r\";}
  }"
done
#连接测试
[root@master-1 ~]#ssh master-2

4.8 优化内核参数

#所有节点
[root@master-1 ~]# cat >/etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
fs.file-max=52706963
fs.nr_open=52706963
EOF
#应用内核配置
[root@master-1 ~]# sysctl -p

4.9 高可用节点安装Keepalived

[root@master-1 ~]# yum install -y keepalived
#注意修改网卡地址与SLAVE节点的优先级
[root@master-1 ~]# cat >/etc/keepalived/keepalived.conf <<EOL
global_defs {
   router_id KUB_LVS
}
vrrp_script CheckMaster {
    script "curl -k https://192.168.91.254:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface ens32
    virtual_router_id 61
    priority 100
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 111111
    }
    virtual_ipaddress {
        192.168.91.254/24 dev ens32
    }
    track_script {
        CheckMaster
    }
}
EOL
#SLAVE
#修改state为slave, priority 为 90
启动keepalived
[root@master-1 ~]# systemctl enable keepalived && systemctl restart keepalived
[root@master-1 ~]# service keepalived status
#192.168.91.11
#添加版本库
[root@harbor ~]# vim /etc/yum.repos.d/nginx.repo 
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/x86_64/
gpgcheck=0
enabled=1
#安装
[root@harbor ~]# yum install nginx-1.12.2 -y
#删除默认页面
[root@harbor harbor]# rm /etc/nginx/conf.d/default.conf -rf
#编辑配置文件
[root@harbor harbor]# vim /etc/nginx/nginx.conf
#最后添加 http之外
stream {
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/access.log  main;
    upstream apiserver {
        server 192.168.91.18:6443;
        server 192.168.91.19:6443;
        server 192.168.91.20:6443;
    }
    server {
        listen 192.168.91.254:6443;
        proxy_connect_timeout 1s;
        proxy_timeout 2s;
        proxy_pass apiserver;
    }
}
#启动服务
[root@harbor harbor]# chkconfig nginx on
[root@harbor harbor]# service nginx start

5配置证书

5.1 下载自签名证书生成工具

#在分发机器Master-1上操作
[root@master-1 ~]# mkdir /soft && cd /soft
[root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@master-1 ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
[root@master-1 ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@master-1 ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@master-1 ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

5.2 生成ETCD证书

#创建目录(Master-1)
[root@master-1 ~]# mkdir /root/etcd && cd /root/etcd

5.2.2 创建CA证书请求文件(Master-1)

[root@master-1 ~]# cat << EOF | tee ca-csr.json
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

5.2.3 创建ETCD证书请求文件

#可以把所有的master IP 加入到csr文件中(Master-1)
[root@master-1 ~]# cat << EOF | tee server-csr.json
{
    "CN": "etcd",
    "hosts": [
    "master-1",
    "master-2",
    "master-3",
    "192.168.91.18",
    "192.168.91.19",
    "192.168.91.20"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

5.2.4 生成 ETCD CA 证书和ETCD公私钥(Master-1)

[root@master-1 ~]# cd /root/etcd/
#生成ca证书(Master-1)
[root@master-1 ~]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca –
[root@master-1 etcd]# ll
total 24
-rw-r--r-- 1 root root  287 Apr  5 11:23 ca-config.json      #ca 的配置文件
-rw-r--r-- 1 root root  956 Apr  5 11:26 ca.csr       #ca 证书生成文件
-rw-r--r-- 1 root root  209 Apr  5 11:23 ca-csr.json      #ca 证书请求文件
-rw------- 1 root root 1679 Apr  5 11:26 ca-key.pem     #ca 证书key
-rw-r--r-- 1 root root 1265 Apr  5 11:26 ca.pem       #ca 证书
-rw-r--r-- 1 root root  338 Apr  5 11:26 server-csr.json
#生成etcd证书(Master-1)
[root@master-1 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
[root@master-1 etcd]# ll
total 36
-rw-r--r-- 1 root root  287 Apr  5 11:23 ca-config.json
-rw-r--r-- 1 root root  956 Apr  5 11:26 ca.csr
-rw-r--r-- 1 root root  209 Apr  5 11:23 ca-csr.json
-rw------- 1 root root 1679 Apr  5 11:26 ca-key.pem
-rw-r--r-- 1 root root 1265 Apr  5 11:26 ca.pem
-rw-r--r-- 1 root root 1054 Apr  5 11:31 server.csr
-rw-r--r-- 1 root root  338 Apr  5 11:26 server-csr.json
-rw------- 1 root root 1675 Apr  5 11:31 server-key.pem #etcd客户端使用
-rw-r--r-- 1 root root 1379 Apr  5 11:31 server.pem

5.3 创建 Kubernetes 相关证书

#此证书用于Kubernetes节点直接的通信, 与之前的ETCD证书不同. (Master-1)
[root@master-1 ~]# mkdir /root/kubernetes/ && cd /root/kubernetes/

5.3.1 配置ca 文件(Master-1)

[root@master-1 ~]# cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

5.3.2 创建ca证书申请文件(Master-1)

[root@master-1 ~]#  cat << EOF | tee ca-csr.json
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

5.3.3 生成API SERVER证书申请文件(Master-1)

#注意要修改VIP的地址
[root@master-1 ~]#  cat << EOF | tee server-csr.json
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
"10.0.0.2",
"192.168.91.18",
"192.168.91.19",
"192.168.91.20",
"192.168.91.21",
"192.168.91.22",
"192.168.91.254",
"master-1",
"master-2",
"master-3",
"node-1",
"node-2",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

5.3.4 创建 Kubernetes Proxy 证书申请文件(Master-1)

[root@master-1 ~]#  cat << EOF | tee kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Beijing",
      "ST": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

5.3.5 生成 kubernetes CA 证书和公私钥

# 生成ca证书(Master-1)
[root@master-1 ~]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca –
# 生成 api-server 证书(Master-1)
[root@master-1 ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
# 生成 kube-proxy 证书(Master-1)
[root@master-1 ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

6 部署ETCD

#下载etcd二进制安装文件(所有master)
[root@master-1 ~]# mkdir -p /soft && cd /soft
[root@master-1 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
[root@master-1 ~]# tar -xvf etcd-v3.3.10-linux-amd64.tar.gz
[root@master-1 ~]# cd etcd-v3.3.10-linux-amd64/
[root@master-1 ~]# cp etcd etcdctl /usr/local/bin/

6.1 编辑etcd配置文件(所有master)

#注意修改每个节点的ETCD_NAME
#注意修改每个节点的监听地址
[root@master-1 ~]# mkdir -p /etc/etcd/{cfg,ssl}
[root@master-1 ~]# cat  >/etc/etcd/cfg/etcd.conf<<EOFL
#[Member]
ETCD_NAME="master-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.91.18:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.91.18:2379,http://192.168.91.18:2390"

6.2创建ETCD的系统启动服务(所有master)

[root@master-1 ~]#  cat > /usr/lib/systemd/system/etcd.service<<EOFL
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/cfg/etcd.conf
ExecStart=/usr/local/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem \
--peer-cert-file=/etc/etcd/ssl/server.pem \
--peer-key-file=/etc/etcd/ssl/server-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOFL

6.3 复制etcd证书到指定目录

[root@master-1 ~]# mkdir -p /etc/etcd/ssl/
[root@master-1 ~]# \cp /root/etcd/*pem /etc/etcd/ssl/ -rf
#复制etcd证书到每个节点
[root@master-1 ~]# for i in master-2 master-3 node-1 node-2;do ssh $i mkdir -p /etc/etcd/{cfg,ssl};done
[root@master-1 ~]# for i in master-2 master-3 node-1 node-2;do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl/;done
[root@master-1 ~]# for i in master-2 master-3 node-1 node-2;do echo $i "------>"; ssh $i ls /etc/etcd/ssl;done

6.4 启动etcd (所有节点)

[root@master-1 ~]# chkconfig etcd on
[root@master-1 ~]# service etcd start
[root@master-1 ~]# service etcd status

6.5 检查etcd 集群是否运行正常

[root@master-1 ~]#  etcdctl --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem --endpoints="https://192.168.91.18:2379"  cluster-health
member bcef4c3b581e1d2e is healthy: got healthy result from https://192.168.91.18:2379
member d99a26304cec5ace is healthy: got healthy result from https://192.168.91.19:2379
member fc4e801f28271758 is healthy: got healthy result from https://192.168.91.20:2379
cluster is healthy

6.6 创建Docker所需分配POD 网段 (任意master节点)

[root@master-2 ~]# etcdctl --ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/server.pem --key-file=/etc/etcd/ssl/server-key.pem \
--endpoints="https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379" \
 set /coreos.com/network/config  \
 '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
#检查是否建立网段
[root@master-2 etcd-v3.3.10-linux-amd64]# etcdctl \
--endpoints=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem \
get /coreos.com/network/config
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

7 安装Docker

#在所有的Node节点安装
#安装CE版本
[root@node-1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@node-1 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
[root@node-1 ~]# yum install -y docker-ce-19.03.6 docker-ce-cli-19.03.6 containerd.io

7.1 启动Docker服务

[root@node-1 ~]# chkconfig docker on
[root@node-1 ~]# service docker start
[root@node-1 ~]# service docker status

7.2 配置镜像加速器(所有node节点)

[root@node-1 ~]# mkdir -p /etc/docker
[root@node-1 ~]# tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://plqjafsr.mirror.aliyuncs.com"]
}
EOF

8 部署Flannel

8.1下载Flannel二进制包

#所有的节点,下载到master-1
[root@ node -1 ~]# mkdir /soft ; cd /soft
[root@ node -1 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@ node -1 ~]# tar xvf flannel-v0.10.0-linux-amd64.tar.gz
[root@ node -1 ~]# mv flanneld mk-docker-opts.sh /usr/local/bin/
#复制flanneld到其他的所有节点
[root@ node -1 ~]# for i in master-2 master-3 node-1 node-2;do scp /usr/local/bin/flanneld $i:/usr/local/bin/;done
[root@ node -1 ~]# for i in master-2 master-3 node-1 node-2;do scp /usr/local/bin/mk-docker-opts.sh $i:/usr/local/bin/;done

8.2 配置Flannel (所有节点)

root@node-1 ~]# mkdir -p /etc/flannel
[root@ node -1 ~]# cat > /etc/flannel/flannel.cfg<<EOF
FLANNEL_OPTIONS="-etcd-endpoints=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 -etcd-cafile=/etc/etcd/ssl/ca.pem -etcd-certfile=/etc/etcd/ssl/server.pem  -etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOF
#多个ETCD: -etcd-endpoints=https://192.168.91.200:2379,https://192.168.91.201:2379,https://192.168.91.202:2379

8.3 配置Flannel配置文件

[root@node-1 ~]# cat > /usr/lib/systemd/system/flanneld.service <<EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/etc/flannel/flannel.cfg
ExecStart=/usr/local/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

8.4 启动Flannel

[root@node-1 ~]# service flanneld start
[root@node-1 ~]# chkconfig flanneld on
[root@node-2 ~]# service flanneld status
Redirecting to /bin/systemctl status flanneld.service
● flanneld.service - Flanneld overlay address etcd agent
   Loaded: loaded (/usr/lib/systemd/system/flanneld.service; disabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-04-05 14:35:51 CST; 7min ago
  Process: 11420 ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env (code=exited, status=0/SUCCESS)
 Main PID: 11406 (flanneld)
    Tasks: 8
   Memory: 6.6M
   CGroup: /system.slice/flanneld.service
           └─11406 /usr/local/bin/flanneld --ip-masq -etcd-endpoints=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 -etcd-cafile=/etc/etcd/ssl/ca.pem...
#所有的节点都需要有172.17.0.0/16 网段IP
[root@master-1 soft]# ip a | grep flannel
3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
    inet 172.17.41.0/32 scope global flannel.1
#node节点停止flanneld
[root@node-1 ~]# service flanneld stop

8.5 修改Docker启动文件(node节点)

[root@node-1 ~]# cat >/usr/lib/systemd/system/docker.service<<EOFL
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd  \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOFL

8.6 重启Docker服务

[root@node-1 ~]# systemctl daemon-reload
[root@node-1 ~]# service flanneld restart
[root@node-1 ~]# service docker restart
#检查IP地址, docker 与flanneld 是同一个网段
[root@node-1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:7b:24:0a brd ff:ff:ff:ff:ff:ff
    inet 192.168.91.21/24 brd 192.168.91.255 scope global noprefixroute ens32
       valid_lft forever preferred_lft forever
    inet6 fe80::f8e9:2eba:8648:f6ad/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:1b:93:48:98 brd ff:ff:ff:ff:ff:ff
    inet 172.17.68.1/24 brd 172.17.68.255 scope global docker0
       valid_lft forever preferred_lft forever
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
    link/ether 8a:11:c3:08:83:48 brd ff:ff:ff:ff:ff:ff
    inet 172.17.68.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::8811:c3ff:fe08:8348/64 scope link 
       valid_lft forever preferred_lft forever

8.7 Node 节点验证是否可以访问其他节点Docker0

/

#在每个Node节点Ping其他的节点, 网段都是通的。
[root@master-1 soft]# ping  172.17.68.1
PING 172.17.68.1 (172.17.68.1) 56(84) bytes of data.
64 bytes from 172.17.68.1: icmp_seq=1 ttl=64 time=0.345 ms
64 bytes from 172.17.68.1: icmp_seq=2 ttl=64 time=0.325 ms
64 bytes from 172.17.68.1: icmp_seq=3 ttl=64 time=0.518 ms
9 安装Master 组件
#Master端需要安装的组件如下:
kube-apiserver
kube-scheduler
kube-controller-manager

9.1 安装Api Server服务

9.1.1 下载Kubernetes二进制包(1.15.1)(master-1)

[root@master-1 soft]# cd /soft
[root@master-1 soft]# tar xvf kubernetes-server-linux-amd64.tar.gz 
[root@master-1 soft]# cd kubernetes/server/bin/
[root@master-1 soft]# cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/
#复制执行文件到其他的master节点
[root@master-1 bin]# for i in master-2 master-3;do scp /usr/local/bin/kube* $i:/usr/local/bin/;done

9.1.2 配置Kubernetes证书

/

#Kubernetes各个组件之间通信需要证书,需要复制个每个master节点(master-1)
[root@master-1  soft]#mkdir -p /etc/kubernetes/{cfg,ssl}
[root@master-1  soft]#cp /root/kubernetes/*.pem /etc/kubernetes/ssl/
#复制到其他的节点
[root@master-1  soft]# for i in master-2 master-3 node-1 node-2;do ssh $i mkdir -p /etc/kubernetes/{cfg,ssl};done
[root@master-1  soft]# for i in master-2 master-3 node-1 node-2;do scp /etc/kubernetes/ssl/* $i:/etc/kubernetes/ssl/;done
[root@master-1 bin]# for i in master-2 master-3 node-1 node-2;do echo $i "---------->"; ssh $i ls /etc/kubernetes/ssl;done

9.1.3 创建 TLS Bootstrapping Token

# TLS bootstrapping 功能就是让 kubelet 先使用一个预定的低权限用户连接到 apiserver,
然后向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署
#Token可以是任意的包涵128 bit的字符串,可以使用安全的随机数发生器生成
[root@master-1  soft]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
f89a76f197526a0d4bc2bf9c86e871c3

9.1.4 编辑Token 文件(master-1)

#f89a76f197526a0d4bc2bf9c86e871c3:随机字符串,自定义生成; kubelet-bootstrap:用户名; 10001:UID; system:kubelet-bootstrap:用户组
[root@master-1  soft]# vim /etc/kubernetes/cfg/token.csv
f89a76f197526a0d4bc2bf9c86e871c3,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
#复制到其他的master节点
[root@master-1 bin]# for i in master-2 master-3;do scp /etc/kubernetes/cfg/token.csv $i:/etc/kubernetes/cfg/token.csv;done

9.1.5创建Apiserver配置文件(所有的master节点)

#配置文件内容基本相同, 如果有多个节点, 那么需要修改IP地址即可
[root@master-1  soft]# cat >/etc/kubernetes/cfg/kube-apiserver.cfg <<EOFL
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=0.0.0.0 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/server.pem \
--etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOFL
#参数说明
--logtostderr        启用日志 
---v          日志等级
--etcd-servers        etcd 集群地址 
--etcd-servers=https://192.168.91.200:2379,https://192.168.91.201:2379,https://192.168.91.202:2379
--bind-address        监听地址 
--secure-port https      安全端口 
--advertise-address         集群通告地址 
--allow-privileged          启用授权 
--service-cluster-ip-range Service    虚拟IP地址段 
--enable-admission-plugins      准入控制模块 
--authorization-mode        认证授权,启用RBAC授权
--enable-bootstrap-token-auth       启用TLS bootstrap功能
--token-auth-file        token 文件 
--service-node-port-range     Service Node类型默认分配端口范围

9.1.6 配置kube-apiserver 启动文件(所有的master节点)

/

[root@master-1  soft]# cat >/usr/lib/systemd/system/kube-apiserver.service<<EOFL
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfg
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOFL

9.1.7 启动kube-apiserver服务

[root@master-1  soft]# service kube-apiserver start 
[root@master-1  soft]# chkconfig kube-apiserver on
[root@master-1  soft]# service kube-apiserver status
[root@master-2 ~]# service kube-apiserver status
Redirecting to /bin/systemctl status kube-apiserver.service
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; disabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-04-05 15:12:09 CST; 523ms ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 13884 (kube-apiserver)
   CGroup: /system.slice/kube-apiserver.service
           └─13884 /usr/local/bin/kube-apiserver --logtostderr=true --v=4 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --etcd-servers=https://192.168.91.18:2379,https://192.16...
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939058   13884 flags.go:33] FLAG: --token-auth-file="/etc/kubernetes/cfg/token.csv"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939062   13884 flags.go:33] FLAG: --v="4"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939065   13884 flags.go:33] FLAG: --version="false"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939077   13884 flags.go:33] FLAG: --vmodule=""
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939081   13884 flags.go:33] FLAG: --watch-cache="true"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939085   13884 flags.go:33] FLAG: --watch-cache-sizes="[]"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939103   13884 services.go:45] Setting service IP to "10.0.0.1" (read-write).
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939120   13884 server.go:560] external host was not specified, using 192.168.91.19
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939129   13884 server.go:603] Initializing cache sizes based on 0MB limit
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.952964   13884 server.go:147] Version: v1.15.1
#查看加密的端口是否已经启动
[root@master-2 ~]# netstat -anltup | grep 6443
tcp        0      0 192.168.91.19:6443      0.0.0.0:*               LISTEN      14061/kube-apiserve 
tcp        0      0 192.168.91.19:6443      192.168.91.19:36760     ESTABLISHED 14061/kube-apiserve 
tcp        0      0 192.168.91.19:36760     192.168.91.19:6443      ESTABLISHED 14061/kube-apiserve
#查看加密的端口是否已经启动(node节点)
[root@node-1 ~]# telnet 192.168.91.254 6443
Trying 192.168.91.254...
Connected to 192.168.91.254.
Escape character is '^]'.

9.2 部署kube-scheduler 服务

#创建kube-scheduler systemd unit 文件(所有的master节点)
[root@master-1  soft]# cat >/usr/lib/systemd/system/kube-scheduler.service<<EOFL
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfg
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOFL

9.2.2 启动kube-scheduler服务(所有的master节点)

[root@master-1  soft]# service kube-scheduler restart
[root@master-1  soft]# chkconfig kube-scheduler on

9.2.3查看Master节点组件状态(任意一台master)

[root@master-1 bin]# kubectl get cs
NAME                 STATUS      MESSAGE               ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Healthy     ok                                                                                          
etcd-0               Healthy     {"health":"true"}

9.3 部署kube-controller-manager

9.3.1创建kube-controller-manager配置文件(所有节点)

[root@master-1 bin]# cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOFL
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=0.0.0.0 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
EOFL
#参数说明
--master=127.0.0.1:8080  #指定Master地址
--leader-elect     #竞争选举机制产生一个 leader 节点,其它节点为阻塞状态。
--service-cluster-ip-range #kubernetes service 指定的IP地址范围。

9.3.2 创建kube-controller-manager 启动文件

/

[root@master-1 bin]#  cat  >/usr/lib/systemd/system/kube-controller-manager.service<<EOFL
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfg
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOFL

9.3.3启动kube-controller-manager服务

/

[root@master-1 bin]#  chkconfig kube-controller-manager on
[root@master-1 bin]#  service kube-controller-manager start
[root@master-2 ~]# service kube-controller-manager status
Redirecting to /bin/systemctl status kube-controller-manager.service
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-04-05 15:52:30 CST; 1s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 16979 (kube-controller)
   CGroup: /system.slice/kube-controller-manager.service
           └─16979 /usr/local/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=0.0.0.0

9.4 查看Master 节点组件状态

#必须要在各个节点组件正常的情况下, 才去部署Node节点组件.(master节点)
[root@master-1 bin]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}

10 部署Node节点组件


10.1部署 kubelet 组件


10.1.1 从Master节点复制Kubernetes 文件到Node

/

#配置Node节点
[root@master-1 bin]#cd /soft
[root@master-1 bin]# for i in node-1 node-2;do scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy $i:/usr/local/bin/;done

10.1.2 创建kubelet bootstrap.kubeconfig 文件

#Maste-1节点
[root@master-1 bin]# mkdir /root/config ; cd /root/config
[root@master-1 bin]# cat >environment.sh<<EOFL
# 创建kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3
KUBE_APISERVER="https://192.168.91.254:6443"
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=\${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=\${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#通过 bash environment.sh获取 bootstrap.kubeconfig 配置文件。
EOFL
#执行脚本
[root@master-1 bin]# sh environment.sh

10.1.3创建kube-proxy kubeconfig文件 (master-1)

[root@master-1 bin]# cat  >env_proxy.sh<<EOF
# 创建kube-proxy kubeconfig文件
BOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3
KUBE_APISERVER="https://192.168.91.254:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=\${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
EOF
#执行脚本
[root@master-1 bin]# sh env_proxy.sh

10.1.4 复制kubeconfig文件与证书到所有Node节点

#将bootstrap kubeconfig kube-proxy.kubeconfig 文件复制到所有Node节点
#远程创建目录 (master-1)
[root@master-1 bin]# for i in node-1 node-2;do ssh $i "mkdir -p /etc/kubernetes/{cfg,ssl}";done
#复制证书文件ssl  (master-1)
[root@master-1 config]# for i in node-1 node-2;do scp /etc/kubernetes/ssl/* $i:/etc/kubernetes/ssl/;done
#复制kubeconfig文件  (master-1)
[root@master-1 bin]# cd /root/config
[root@master-1 config]# for i in node-1 node-2;do scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig $i:/etc/kubernetes/cfg/;done

10.1.5 创建kubelet参数配置文件

/

#不同的Node节点, 需要修改IP地址 (node节点操作)
[root@ node-1 bin]#cat >/etc/kubernetes/cfg/kubelet.config<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.91.21
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.0.0.2"]
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true
EOF

10.1.6 创建kubelet配置文件

#不同的Node节点, 需要修改IP地址
#/etc/kubernetes/cfg/kubelet.kubeconfig 文件自动生成
[root@node-1 bin]#cat >/etc/kubernetes/cfg/kubelet<<EOF
KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.91.21 \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--config=/etc/kubernetes/cfg/kubelet.config \
--cert-dir=/etc/kubernetes/ssl \
--pod-infra-container-image=docker.io/kubernetes/pause:latest"
EOF

10.1.7 创建kubelet系统启动文件(node节点)

[root@node-1 bin]#cat >/usr/lib/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

10.1.8 将kubelet-bootstrap用户绑定到系统集群角色

#master-1节点操作
[root@master-1 bin]#kubectl create clusterrolebinding kubelet-bootstrap \
  --clusterrole=system:node-bootstrapper \
  --user=kubelet-bootstrap

10.1.9 启动kubelet服务(node节点)

[root@node-1 bin]#chkconfig kubelet on 
[root@node-1 bin]#service kubelet start 
[root@node-1 bin]#service kubelet status

10.2 服务端批准与查看CSR请求

#查看CSR请求
#Maste-1节点操作
[root@master1 cfg]# kubectl get csr
NAME                                             AGE     REQUESTOR           CONDITION
node-csr-4_tHtI9Y1ZOd1V3ZF5URGT7bWuRZWOizZYgeaBiAHOY   9m40s   kubelet-bootstrap   Pending
node-csr-bvq5buFKqAMvdJWOUjjP7hdez3xkQq5DPC4nNIL2vQs   9m37s   kubelet-bootstrap   Pending

10.2.1 批准请求

#Master节点操作
[root@master-1 bin]#kubectl certificate approve node-csr-4_tHtI9Y1ZOd1V3ZF5URGT7bWuRZWOizZYgeaBiAHOY
[root@master-1 bin]#kubectl certificate approve node-csr-bvq5buFKqAMvdJWOUjjP7hdez3xkQq5DPC4nNIL2vQs

10.3 节点重名处理

#如果出现节点重名, 可以先删除证书, 然后重新申请
#Master节点删除csr
[root@master-1 bin]# kubectl delete csr node-csr-U4v31mc3j_xPq5n1rU2KdpyugqfFH_0g1wOC66oiu04
#Node节点删除kubelet.kubeconfig
#客户端重启kubelet服务, 再重新申请证书
[root@node-1 bin]#rm -rf /etc/kubernetes/cfg/kubelet.kubeconfig

10.4 查看节点状态

#所有的Node节点状态必须为Ready (master)
[root@master-1 ~]# kubectl get nodes
NAME     STATUS   ROLES    AGE   VERSION
node-1   Ready    <none>   8s    v1.15.1
node-2   Ready    <none>   16s   v1.15.1

10.4 部署kube-proxy 组件

# kube-proxy 运行在所有Node节点上, 监听Apiserver 中 Service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡。


10.4.1 创建kube-proxy配置文件

#注意修改hostname-override地址, 不同的节点则不同。
[root@node-1 ~]#cat >/etc/kubernetes/cfg/kube-proxy<<EOF
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--metrics-bind-address=0.0.0.0 \
--hostname-override=192.168.91.21 \
--cluster-cidr=10.0.0.0/24 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
EOF

10.4.2 创建kube-proxy systemd unit 文件

/

[root@node-1 ~]#cat >/usr/lib/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

10.4.3 启动kube-proxy 服务

[root@node-1 ~]#chkconfig kube-proxy on 
[root@node-1 ~]#service kube-proxy start 
[root@node-1 ~]#service kube-proxy status

11运行Demo项目

[root@master-1 soft]# kubectl run nginx --image=nginx --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
#获取容器IP与运行节点
[root@master-1 ~]# kubectl get pods -o wide
#创建容器svc端口
[root@master-1 ~]#kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
#查看容器状态
[root@master-1 ~]# kubectl describe pod nginx-7bb7cd8db5-g7ms2

11.1 查看SVC

[root@master-1 cfg]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        45m
nginx        NodePort    10.0.0.93    <none>        88:43404/TCP   3s

11.2 访问web

[root@master-1 cfg]# curl http://192.168.91.21:43404
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

11.3 删除项目

[root@master-1 cfg]# kubectl delete deployment nginx 
[root@master-1 cfg]# kubectl delete pods nginx
[root@master-1 cfg]# kubectl delete svc -l run=nginx
[root@master-1 cfg]# kubectl delete deployment.apps/nginx

11.5 服务启动顺序

11.5.1 启动Master节点

/

[root@master-1 cfg]# service keepalived start
[root@master-1 cfg]# service etcd start
[root@master-1 cfg]# service kube-scheduler start
[root@master-1 cfg]# service kube-controller-manager start
[root@master-1 ~]# service kube-apiserver  restart
[root@master-1 cfg]# kubectl get cs

11.5.2 启动Node节点

[root@node-1 cfg]# service flanneld start

[root@node-1 cfg]# service docker start

[root@node-1 cfg]# service kubelet start

[root@node-1 cfg]# service kube-proxy start


11.5.3 停止Node节点

[root@node-1 cfg]# service kubelet stop

[root@node-1 cfg]# service kube-proxy stop

[root@node-1 cfg]# service docker stop

[root@node-1 cfg]# service flanneld stop


11.5.4 停止Master 节点

[root@master-1 cfg]# service kube-controller-manager stop

[root@master-1 cfg]# service kube-scheduler stop

[root@master-1 cfg]# service etcd stop

[root@master-1 cfg]# service keepalived stop

12 部署DNS

12.1 部署coredns

[root@master-1 cfg]# mkdir /root/dns && cd /root/dns
[root@master-1 cfg]# kubectl apply -f coredns.yaml
#查询所有ns中的pod
[root@master-1 dns]# kubectl get pod -A
#查询指定ns中的pod
[root@master-1 dns]# kubectl get pod -n kube-system
#查看启动进程
[root@master-1 dns]# kubectl describe pod coredns-66db855d4d-26bvw  -n kube-system

12.2 查看SVC

[root@master1 kubernetes]# kubectl get svc -o wide -n=kube-system
NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
kube-dns           ClusterIP   10.0.0.254   <none>        53/UDP,53/TCP,9153/TCP   27s   k8s-app=kube-dns

12.3 验证DNS是否有效

12.3.1 删除之前创建的nginx demo

[root@master-1 cfg]#kubectl delete deployment nginx 
[root@master-1 cfg]#kubectl delete pods nginx
[root@master-1 cfg]#kubectl delete svc -l run=nginx
[root@master-1 cfg]#kubectl delete deployment.apps/nginx

12.3.2 启动新容器

[root@master-1 nginx]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
#出现错误
error: unable to upgrade connection: Forbidden (user=system:anonymous, verb=create, resource=nodes, subresource=proxy)
#解决方法
[root@master-1 nginx]# kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
[root@master-1 dns]# kubectl delete pod  dnstools
[root@master-1 nginx]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools

12.3.2 创建Nginx 容器

[root@master-1 ~]# kubectl run nginx --image=nginx --replicas=2
#创建svc (cluster IP)
# Create a service for an nginx deployment, which serves on port 88 and connects to the containers on port 80.
#template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
      --type='': Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. Default is 'ClusterIP'.
[root@master-1 ~]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort

12.3.3 查看SVC

/

[root@master-1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        158m
nginx        NodePort    10.0.0.55    <none>        88:35638/TCP   5s

12.3.4 测试解析Nginx

#测试解析nginx
#dns 解析的名称是svc (service 名称, 非pod名称)
dnstools# nslookup nginx
Server:         10.0.0.2
Address:        10.0.0.2#53
Name:   nginx.default.svc.cluster.local
Address: 10.0.0.55

12.3.5 案例:容器的网络访问不区分命名空间(kubernetes ns)

#在default ns 可以访问到kube-system ns 服务nginx
[root@master-1 ~]# kubectl run nginx-n1 --image=nginx --replicas=1 -n kube-system
# Create a service for an nginx deployment, which serves on port 99 and connects to the containers on port 80.
#查看容器状态(指定命名空间)
[root@master-3 ~]# kubectl get pods -n kube-system
#查看容器状态(显示所有的命名空间)
[root@master-2 ~]# kubectl get pod,svc -A
[root@master-1 ~]# kubectl expose deployment nginx-n1 --port=99 --target-port=80 -n kube-system

12.3.7 跨ns访问服务

[root@master-1 dns]# kubectl get svc -n kube-system | grep nginx-n1                     
nginx12    ClusterIP   10.0.0.196   <none>        80/TCP                   4m
#访问服务
dnstools# curl 10.0.0.196
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;

12.3.8 #解析不成功

dnstools# nslookup nginx-n1
Server:         10.0.0.2
Address:        10.0.0.2#53
** server can't find nginx-n1: NXDOMAIN
dnstools# nslookup nginx-n1
Server:         10.0.0.2
Address:        10.0.0.2#53
#解决方法(默认解析为default空间)
dnstools# nslookup nginx-n1.kube-system.svc.cluster.local
Server:         10.0.0.2
Address:        10.0.0.2#53
Name:   nginx-n1.kube-system.svc.cluster.local
Address: 10.0.0.196

13 部署Dashboard

13.1 下载文件

13.1.1创建目录(master-1节点)

[root@master-1 ~]# mkdir /root/dashboard

[root@master-1 ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml




13.1.2 修改端口

#修改为nodeport端口50000
#注意镜像地址无法下载, 使用另外的镜像替换
mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.10.1
[root@master-1 ~]# sed -i '/targetPort:/a\ \ \ \ \ \ nodePort: 50000\n\ \ type: NodePort' kubernetes-dashboard.1.10.yaml

13.1.3 部署

[root@master-1 dashboard]# kubectl apply -f kubernetes-dashboard.1.10.yaml


13.1.4查看服务端口

/

[root@master-1 dashboard]# kubectl get services -n kube-system
NAME                   TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
coredns                ClusterIP   10.0.0.2     <none>        53/UDP,53/TCP,9153/TCP   19d
kubelet                ClusterIP   None         <none>        10250/TCP                12d
kubernetes-dashboard   NodePort    10.0.0.218   <none>        443:50000/TCP            16s

13.1.5创建用户授权

[root@master-1 dashboard]# kubectl create serviceaccount  dashboard-admin -n kube-system
[root@master-1 dashboard]# kubectl create clusterrolebinding  \
dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

13.1.6获取Token

#获取token

[root@master-1 ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')


13.1.7 登录系统

#如果之前有安装过其他的版本的dashboard, 那么切换node节点IP访问








#创建虚拟机快照:Kubernetes Basic

13.1.6获取Token

#获取token

[root@master-1 ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')


13.1.7 登录系统

#如果之前有安装过其他的版本的dashboard, 那么切换node节点IP访问








#创建虚拟机快照:Kubernetes Basic

14 部署Ingress

#服务反向代理

#部署Traefik 2.0版本


14.1创建 traefik-crd.yaml 文件 (master-1)

[root@master-1 ~]# mkdir /root/ingress && cd /root/ingress
[root@master-1 ~]# vim traefik-crd.yaml
## IngressRoute
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ingressroutes.traefik.containo.us
spec:
  scope: Namespaced
  group: traefik.containo.us
  version: v1alpha1
  names:
    kind: IngressRoute
    plural: ingressroutes
    singular: ingressroute
---
## IngressRouteTCP
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ingressroutetcps.traefik.containo.us
spec:
  scope: Namespaced
  group: traefik.containo.us
  version: v1alpha1
  names:
    kind: IngressRouteTCP
    plural: ingressroutetcps
    singular: ingressroutetcp
---
## Middleware
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: middlewares.traefik.containo.us
spec:
  scope: Namespaced
  group: traefik.containo.us
  version: v1alpha1
  names:
    kind: Middleware
    plural: middlewares
    singular: middleware
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: tlsoptions.traefik.containo.us
spec:
  scope: Namespaced
  group: traefik.containo.us
  version: v1alpha1
  names:
    kind: TLSOption
    plural: tlsoptions
singular: tlsoption

14.1.1 创建Traefik CRD资源(master-1)

[root@master-1 ~]#  cd /root/ingress

[root@master-1 ~]#  kubectl create -f traefik-crd.yaml

[root@master-1 ingress]# kubectl get CustomResourceDefinition

NAME                                   CREATED AT

ingressroutes.traefik.containo.us      2020-04-12T03:54:48Z

ingressroutetcps.traefik.containo.us   2020-04-12T03:54:48Z

middlewares.traefik.containo.us        2020-04-12T03:54:48Z

tlsoptions.traefik.containo.us         2020-04-12T03:54:48Z



14.2 创建Traefik  RABC文件(master-1)

[root@master-1 ~]#  vi  traefik-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: kube-system
  name: traefik-ingress-controller
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups: [""]
    resources: ["services","endpoints","secrets"]
    verbs: ["get","list","watch"]
  - apiGroups: ["extensions"]
    resources: ["ingresses"]
    verbs: ["get","list","watch"]
  - apiGroups: ["extensions"]
    resources: ["ingresses/status"]
    verbs: ["update"]
  - apiGroups: ["traefik.containo.us"]
    resources: ["middlewares"]
    verbs: ["get","list","watch"]
  - apiGroups: ["traefik.containo.us"]
    resources: ["ingressroutes"]
    verbs: ["get","list","watch"]
  - apiGroups: ["traefik.containo.us"]
    resources: ["ingressroutetcps"]
    verbs: ["get","list","watch"]
  - apiGroups: ["traefik.containo.us"]
    resources: ["tlsoptions"]
    verbs: ["get","list","watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
  - kind: ServiceAccount
    name: traefik-ingress-controller
namespace: kube-system

14.2.1 创建RABC 资源

[root@master-1 ingress]# kubectl create -f traefik-rbac.yaml

14.3 创建Traefik ConfigMap (master-1)

[root@master-1 ~]#  vi traefik-config.yaml 
kind: ConfigMap
apiVersion: v1
metadata:
  name: traefik-config
data:
  traefik.yaml: |-
    serversTransport:
      insecureSkipVerify: true
    api:
      insecure: true
      dashboard: true
      debug: true
    metrics:
      prometheus: ""
    entryPoints:
      web:
        address: ":80"
      websecure:
        address: ":443"
    providers:
      kubernetesCRD: ""
    log:
      filePath: ""
      level: error
      format: json
    accessLog:
      filePath: ""
      format: json
      bufferingSize: 0
      filters:
        retryAttempts: true
        minDuration: 20
      fields:
        defaultMode: keep
        names:
          ClientUsername: drop
        headers:
          defaultMode: keep
          names:
            User-Agent: redact
            Authorization: drop
            Content-Type: keep

14.3.1 创建Traefik ConfigMap资源配置

[root@master-1 ~]#  kubectl apply -f traefik-config.yaml -n kube-system


14.4 设置节点标签

#设置节点label

[root@master-1 ingress]# kubectl label nodes 192.168.91.21 IngressProxy=true

[root@master-1 ingress]# kubectl label nodes 192.168.91.22 IngressProxy=true


14.4.1 查看节点标签

#检查是否成功

[root@master-1 ingress]# kubectl get nodes --show-labels


14.5 创建 traefik 部署文件

#注意每个Node节点的80与443端口不能被占用
[root@master-1 ingress]# netstat -antupl | grep -E "80|443"
[root@master-1 ingress]# vi traefik-deploy.yaml
apiVersion: v1
kind: Service
metadata:
  name: traefik
spec:
  ports:
    - name: web
      port: 80
    - name: websecure
      port: 443
    - name: admin
      port: 8080
  selector:
    app: traefik
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: traefik-ingress-controller
  labels:
    app: traefik
spec:
  selector:
    matchLabels:
      app: traefik
  template:
    metadata:
      name: traefik
      labels:
        app: traefik
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 1
      containers:
        - image: traefik:latest
          name: traefik-ingress-lb
          ports:
            - name: web
              containerPort: 80
              hostPort: 80 
            - name: websecure
              containerPort: 443
              hostPort: 443
            - name: admin
              containerPort: 8080
          resources:
            limits:
              cpu: 2000m
              memory: 1024Mi
            requests:
              cpu: 1000m
              memory: 1024Mi
          securityContext:
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
          args:
            - --configfile=/config/traefik.yaml
          volumeMounts:
            - mountPath: "/config"
              name: "config"
      volumes:
        - name: config
          configMap:
            name: traefik-config 
      tolerations: 
        - operator: "Exists"
      nodeSelector: 
        IngressProxy: "true"

14.5.1部署 Traefik 资源

/

[root@master-1 ingress]#  kubectl apply -f traefik-deploy.yaml -n kube-system
#查看运行状态
[root@master-1 ingress]# kubectl get DaemonSet -A     
NAMESPACE   NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR       AGE
default     traefik-ingress-controller   0         0         0       0            0           IngressProxy=true   2m9s


错误问题:



#解决方法

#命名空间

Kubectl apply -f traefik-default-rbac.yaml


#错误



#解决方法

[root@master-1 ingress]#  kubectl apply -f traefik-config.yaml



正常显示




14.6 Traefik 路由配置

14.6.1 配置Traefik Dashboard

[root@master-1 ingress]#  vi traefik-dashboard-route.yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: traefik-dashboard-route
  namespace: kube-system
spec:
  entryPoints:
    - web
  routes:
    - match: Host(`ingress.abcd.com`)
      kind: Rule
      services:
        - name: traefik
          port: 8080
#创建Ingress (traefik)
[root@master-1 ingress]#  kubectl apply -f traefik-dashboard-route.yaml

14.6.2 客户端访问Traefik Dashboard


14.6.2.1 绑定物理主机Hosts文件或者域名解析

/etc/hosts

192.168.91.21 ingress.abcd.com


14.6.2.2 访问web



14.7 部署访问服务(http)

#创建nginx服务
[root@master-1 ingress]#  kubectl run nginx-ingress-demo1 --image=nginx --replicas=1 -n kube-system
[root@master-1 ingress]#  kubectl expose deployment nginx-ingress-demo1 --port=1099 --target-port=80 -n kube-system
#创建nginx路由服务
vim nginx-ingress-demo-route1.yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: traefik-nginx-demo-route1
  namespace: kube-system
spec:
  entryPoints:
    - web
  routes:
    - match: Host(`nginx11.abcd.com`)
      kind: Rule
      services:
        - name: nginx-ingress-demo1
          port: 1099
#创建
[root@master-1 ingress]# kubectl  apply -f nginx-ingress-demo-route1.yaml
[root@master-1 ingress]# kubectl get IngressRoute -A
NAMESPACE     NAME                       AGE
default       traefik-dashboard-route    48m
kube-system   traefik-nginx-demo-route   68s
#访问
#绑定hosts (物理机器)
192.168.91.21 nginx11.abcd.com

14.8 创建https服务

#代理dashboard https 服务
# 创建自签名证书
[root@master-1 ingress]#  cd /root/ingress
[root@master-1 ingress]#  openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=cloud.abcd.com"
#将证书存储到 Kubernetes Secret中
[root@master-1 ingress]#  kubectl create secret tls dashboard-tls --key=tls.key --cert=tls.crt -n kube-system
#查看系统secret
[root@master-1 ingress]# kubectl get secret
NAME                                     TYPE                                  DATA   AGE
default-token-l77nw                      kubernetes.io/service-account-token   3      6d22h
traefik-ingress-controller-token-pdbhn   kubernetes.io/service-account-token   3      132m
#创建路由文件
#先查询kuberbentes dashboard 的命名空间
[root@master-1 ingress]# cat kubernetes-dashboard-route.yaml 
#注意命名空间
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: kubernetes-dashboard-route
  namespace: kubernetes-dashboard
spec:
  entryPoints:
    - websecure
  tls:
    secretName: dashboard-tls
  routes:
    - match: Host(`cloud.abcd.com`) 
      kind: Rule
      services:
        - name: kubernetes-dashboard
          port: 443
#创建 Kubernetes Dashboard 路由规则对象
[root@master-1 ingress]# kubectl create ns kubernetes-dashboard
[root@master-1 ingress]# kubectl apply  -f kubernetes-dashboard-route.yaml
#查看创建的路由
[root@master-1 ingress]#  kubectl get IngressRoute -A                     
NAMESPACE              NAME                         AGE
default                traefik-dashboard-route      125m
kube-system            traefik-nginx-demo-route     77m
kube-system            traefik-nginx-demo-route1    3m5s
kubernetes-dashboard   kubernetes-dashboard-route   13s
#绑定hosts 访问
192.168.91.21  cloud.abcd.com
配置完成后,打开浏览器输入地址:https://cloud.abcd.com打开 Dashboard Dashboard。

14.9 TCP服务访问

/

#修改配置文件
#traefik-config.yaml
    entryPoints:
      web:
        address: ":80"
      websecure:
        address: ":443"
      redistcp:
        address: ":6379"
#应用配置
[root@master-1 ingress]# kubectl apply -f traefik-config.yaml -n kube-system
#修改配置文件
#traefik-deploy.yaml
      containers:
          ports:
            - name: web
              containerPort: 80
              hostPort: 80
            - name: websecure
              containerPort: 443
              hostPort: 443
            - name: admin
              containerPort: 8080
            - name: redistcp
              containerPort: 6379
              hostPort: 6379
#应用配置
[root@master-1 ingress]#kubectl apply -f traefik-deploy.yaml -n kube-system
#配置redis文件
[root@master-1 ingress]# cat redis-tcp-deploy.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: redis-tcp
spec:
  template:
    metadata:
      labels:
        app: redis-tcp
    spec:
      containers:
      - name: redis-tcp
        image: redis
        ports:
        - containerPort: 6379
          protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: redis-tcp-svc
spec:
  ports:
  - port: 6379
    targetPort: 6379
  selector:
    app: redis-tcp
#部署redis
[root@master-1 ingress]# kubectl apply -f redis-tcp-deploy.yaml 
deployment.extensions/redis-tcp unchanged
service/redis-tcp-svc unchanged
#配置路由
[root@master-1 ingress]# cat  traefik-redis-tcp-route.yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
  name: redis-tcp-ingress
spec:
  entryPoints:
    - redistcp
  routes:
  - match: HostSNI(`*`)
    services:
    - name: redis-tcp-svc
      port: 6379
      weight: 10
      terminationDelay: 400
#部署路由
[root@master-1 ingress]# kubectl apply -f traefik-redis-tcp-route.yaml
#查看界面
#绑定任意主机名到node节点访问
#192.168.91.21 redis.cc.com
[root@master-2 ~]# redis-cli -h redis.cc.com -p 6379
redis.cc.com:6379> set a 12131
OK
redis.cc.com:6379> get a
"12131"

15 部署监控系统



15.2监控方案


读取数据流程:



15.1 安装NFS服务端

# Prometheus 与 Grafana 存储使用

15.1.1 master节点安装nfs

[root@master-1 ~]#  yum -y install nfs-utils


15.1.2创建nfs目录

[root@master-1 ~]#  mkdir -p /ifs/kubernetes


15.1.3修改权限

[root@master-1 ~]#  chmod -R 777 /ifs/kubernetes


15.1.4编辑export文件

[root@master-1 ~]#  vim /etc/exports

/ifs/kubernetes *(rw,no_root_squash,sync)


15.1.5 修改配置启动文件

/

#修改配置文件
[root@master-1 ~]# cat >/etc/systemd/system/sockets.target.wants/rpcbind.socket<<EOFL
[Unit]
Description=RPCbind Server Activation Socket
[Socket]
ListenStream=/var/run/rpcbind.sock
ListenStream=0.0.0.0:111
ListenDatagram=0.0.0.0:111
[Install]
WantedBy=sockets.target
EOFL

15.1.6 配置生效

[root@master-1 ~]#  exportfs -f


15.1.7 启动rpcbind、nfs服务

[root@master-1 ~]#  systemctl restart rpcbind

[root@master-1 ~]#  systemctl enable rpcbind

[root@master-1 ~]#  systemctl restart nfs

[root@master-1 ~]#  systemctl enable nfs


15.1.8 showmount测试(master-1)

[root@master-1 ~]# showmount -e 192.168.91.18

Export list for 192.168.91.18:

/ifs/kubernetes *


15.1.9 所有node节点安装客户端

[root@master-1 ~]# yum -y install nfs-utils


15.2.0 所有的Node检查

#所有的节点是否可以挂载, 必须要可以看到, 才能挂载成功.

[root@node-1 ~]# showmount -e 192.168.91.18

Export list for 192.168.91.18:

/ifs/kubernetes *



15.2.1 部署PVC

Nfs服务端地址需要修改
[root@master-1 nfs]#  kubectl apply  -f  nfs-class.yaml 
#注意修改NFS IP地址nfs-deployment.yaml
[root@master-1 ~]# kubectl apply  -f  nfs-deployment.yaml
[root@master-1 ~]# kubectl apply  -f  nfs-rabc.yaml
#查看nfs pod状态
[root@master-1 nfs]# kubectl get pods
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-59dbcb48b9-s8np8   1/1     Running   0          11s

15.2.2 查看是否部署成功

[root@master-1 nfs]# kubectl get StorageClass

NAME                  PROVISIONER      AGE

managed-nfs-storage   fuseim.pri/ifs   33s


15.2.3 登录页面查看



15.3 部署监控系统

注意需要修改的配置文件
#修改IP为
[root@master-1 serviceMonitor]# cd serviceMonitor
[root@master-1 serviceMonitor]# ls | xargs grep 91
prometheus-EtcdService.yaml:  - ip: 192.168.91.18
prometheus-EtcdService.yaml:  - ip: 192.168.91.19
prometheus-EtcdService.yaml:  - ip: 192.168.91.20
prometheus-kubeControllerManagerService.yaml:  - ip: 192.168.91.18
prometheus-kubeControllerManagerService.yaml:  - ip: 192.168.91.19
prometheus-kubeControllerManagerService.yaml:  - ip: 192.168.91.20
prometheus-KubeProxyService.yaml:  - ip: 192.168.91.21
prometheus-KubeProxyService.yaml:  - ip: 192.168.91.22
prometheus-kubeSchedulerService.yaml:  - ip: 192.168.91. 18
prometheus-kubeSchedulerService.yaml:  - ip: 192.168.91. 19
prometheus-kubeSchedulerService.yaml:  - ip: 192.168.91.20
#创建权限与alertmanager服务
[root@master-1 monitor]#  kubectl apply -f setup/
[root@master-1 monitor]#  kubectl apply -f alertmanager/
#先下载镜像
[root@master-1 prometheus]# cd node-exporter
[root@master-1 node-exporter]# ls | xargs grep image
node-exporter-daemonset.yaml:        image: prom/node-exporter:v0.18.1
node-exporter-daemonset.yaml:        image: quay.io/coreos/kube-rbac-proxy:v0.4.1
#节点拉取镜像
[root@node-1 ~]# docker pull quay.io/coreos/kube-rbac-proxy:v0.4.1
[root@node-1 ~]# docker pull prom/node-exporter:v0.18.1
[root@master-1 monitor]#  kubectl apply -f node-exporter/
#节点拉取镜像
[root@node-1 ~]# docker pull quay.io/coreos/kube-state-metrics:v1.8.0
[root@master-1 monitor]#  kubectl apply -f kube-state-metrics/
#节点拉取镜像
[root@node-1 ~]# docker pull  grafana/grafana:6.4.3
[root@master-1 monitor]#  kubectl apply -f grafana/
[root@master-1 monitor]#  kubectl apply -f prometheus/
[root@master-1 monitor]#  kubectl apply -f serviceMonitor/
注意如果提示权限问题, 解决方法如下:
#如果没有错误略过
[root@master-1 monitor]#  kubectl create serviceaccount kube-state-metrics -n monitoring
[root@master-1 monitor]#  kubectl create serviceaccount grafana -n monitoring
[root@master-1 monitor]#  kubectl create serviceaccount prometheus-k8s -n monitoring
#创建权限文件
#如果没有错误略过
#kube-state-metrics
[root@master-1 kube-state-metrics]# cat kube-state-metrics-rabc.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kube-state-metrics-rbac
subjects:
  - kind: ServiceAccount
    name: kube-state-metrics
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
# grafana
[root@master-1 grafana]# cat grafana-rabc.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: grafana-rbac
subjects:
  - kind: ServiceAccount
    name: grafana
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
# prometheus
[root@master-1 grafana]# cat prometheus-rabc.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: prometheus-rbac
subjects:
  - kind: ServiceAccount
    name: prometheus-k8s
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

15.3.1 获取Grafana Pod

[root@master-1 ~]# kubectl get pod -A -o wide| grep grafana

monitoring             grafana-5dc77ff8cb-gwnqs                  1/1     Running   0          2d3h   172.17.64.10     192.168.91.146


15.3.2 获取Grafana SVC

[root@master-1 ~]# kubectl get svc -A | grep grafana

monitoring             grafana                   NodePort    10.0.0.57    <none>        3000:45523/TCP               2d3h


15.3.3 登录Grafana Dashboard

#用户与密码: admin/admin

15.3.4 选择资源

#获取prometheus 地址
[root@master-1 prometheus]#  kubectl get svc -A  | grep prometheus-k8s
monitoring             prometheus-k8s            NodePort    10.0.0.122   <none>        9090:31626/TCP               21m
#查看servicemonitor
[root@master-1 ingress]# kubectl get servicemonitor -A
NAMESPACE    NAME                      AGE
monitoring   alertmanager              5d19h
monitoring   coredns                   5d19h
monitoring   grafana                   5d19h
monitoring   kube-apiserver            5d19h
monitoring   kube-controller-manager   5d19h
monitoring   kube-etcd                 5d19h
monitoring   kube-proxy                5d19h
monitoring   kube-scheduler            5d19h
monitoring   kube-state-metrics        5d19h
monitoring   kubelet                   5d19h
monitoring   node-exporter             5d19h
monitoring   prometheus                5d19h
monitoring   prometheus-operator       5d19h
http://192.168.91.21:31626/targets
#添加Ingress 到监控系统
#修改Ingress Services 配置文件
[root@master-1 ingress]# cat traefik-deploy.yaml
apiVersion: v1
kind: Service
metadata:
  name: traefik
  labels:                       
    app: traefik-metrics
spec:
  ports:
    - name: web
      port: 80
    - name: websecure
      port: 443
    - name: admin
      port: 8080
  selector:
app: traefik
……………………..
#注意命名空间
[root@master-1 ingress]#  kubectl apply -f traefik-deploy.yaml -n kube-system
#查看service内容
[root@master-1 ingress]# kubectl  describe svc traefik -n kube-system
Name:              traefik
Namespace:         kube-system
Labels:            app=traefik-metrics
Annotations:       kubectl.kubernetes.io/last-applied-configuration:
                     {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"traefik-metrics"},"name":"traefik","namespace":"default"...
Selector:          app=traefik
Type:              ClusterIP
IP:                10.0.0.3
Port:              web  80/TCP
TargetPort:        80/TCP
Endpoints:         172.17.90.14:80,172.17.98.5:80
Port:              websecure  443/TCP
TargetPort:        443/TCP
Endpoints:         172.17.90.14:443,172.17.98.5:443
Port:              admin  8080/TCP
TargetPort:        8080/TCP
Endpoints:         172.17.90.14:8080,172.17.98.5:8080
Session Affinity:  None
Events:            <none>
#添加serviesMonitor 监控
[root@master-1 ingress]# cat  traefik-serviceMonitor.yaml                  
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  name: traefik
  namespace: monitoring
  labels:
    app: traefik-metrics
spec:
  jobLabel: app
  endpoints:
  - port: admin
    interval: 10s
    path: '/metrics'
  selector:
    matchLabels:
      app: traefik-metrics
  namespaceSelector:
    matchNames:
    - kube-system
#创建监控
[root@master-1 ingress]# kubectl apply -f traefik-serviceMonitor.yaml
#查看prometheus
#等待1分钟左右
#查询数据
#添加页面到grafana
#导入模板文件 Traefik 2-1587191399741.json
#安装插件grafana-piechart-panel
[root@master-1 ingress]# kubectl exec -ti -n monitoring grafana-5dc77ff8cb-srd9h /bin/bash
bash-5.0$ grafana-cli plugins install grafana-piechart-panel
#删除pod
[root@master-1 ingress]# kubectl delete pods monitoring grafana-5dc77ff8cb-srd9h -n monitoring
#访问nginx pod
[root@master-1 ingress]#  kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
#绑定hosts
dnstools# cat /etc/hosts
# Kubernetes-managed hosts file.
127.0.0.1       localhost
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
172.17.51.9     dnstools
192.168.91.21 nginx11.abcd.com
#通过Ingress 访问服务
dnstools# for i in `seq 1 10000`;do curl nginx11.abcd.com;done
#展示数据
#需要访问Ingress 路由的服务, 才会有数据展示

16 容器日志收集方案

※  把log-agent打包至业务镜像

※  日志落地至物理节点

※  每个物理节点启动日志容器

本例中在每个node 节点部署一个pod 收集日志
/

17 安装日志组件

#设置serviceAccount

[root@master-1 java]# kubectl create serviceaccount admin -n kube-system


8.7.1 配置权限

[root@master-1 logs]# cat es-rbac.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: es-rbac
subjects:
  - kind: ServiceAccount
    name: admin 
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
#创建权限
[root@master-1 logs]# kubectl apply -f  es-rbac.yaml

8.7.1 安装Elasticsearch

/

[root@master-200 log]# docker pull registry.cn-hangzhou.aliyuncs.com/cqz/elasticsearch:5.5.1
[root@master-200 log]# wget https://acs-logging.oss-cn-hangzhou.aliyuncs.com/elasticsearch.yml (需要修改内存大小)
#导入的方式加载容器(所有的Node节点)
[root@master-200 log]# docker load < es_5.5.1.tar
[root@master-200 log]# docker load < kibana_5.5.1.tar
[root@master-200 log]# docker load < log-pilot.tar.gz
[root@master-200 log]# docker tag repo.hostscc.com/elk/elasticsearch:5.5.1  registry.cn-hangzhou.aliyuncs.com/cqz/elasticsearch:5.5.1
[root@master-200 log]# kubectl apply -f elasticsearch.yml
#查看节点状态
[root@master-1 logs]# kubectl describe  StatefulSet -A

8.7.2 查看ES 在Kubernetes中的状态

#最好有三个ES 节点
[root@master-200 log]# kubectl get StatefulSet -n kube-system
NAME                                    READY   STATUS            RESTARTS   AGE
elasticsearch-0                         0/1     PodInitializing   0          91s
kubernetes-dashboard-69dcdb65fd-psnq9   1/1     Running           1          32h

8.7.3 查看ES 状态

[root@master-200 log]#  kubectl exec -it  elasticsearch-0  bash  -n  kube-system 
#执行检查命令:
#curl http://localhost:9200/_cat/health?v  
elasticsearch@elasticsearch-0: $ curl http://localhost:9200/_cat/health?v
epoch      timestamp cluster        status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1574791667 18:07:47  docker-cluster green           2         2      0   0    0    0        0             0                  -                100.0%
error: unable to upgrade connection: Forbidden (user=system:anonymous, verb=create, resource=nodes, subresource=proxy)
#解决方法:
[root@master-200 log]# kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous

8.7.4 安装 log-pilot

[root@master-200 log]#  wget https://acs-logging.oss-cn-hangzhou.aliyuncs.com/log-pilot.yml
[root@master-200 log]#  docker pull registry.cn-hangzhou.aliyuncs.com/acs-sample/log-pilot:0.9-filebeat
#所有的Node节点
[root@node-1 opt]# docker tag log-pilot:latest registry.cn-hangzhou.aliyuncs.com/acs-sample/log-pilot:0.9-filebeat
#部署
[root@master-200 log]#  kubectl apply -f log-pilot.yml

8.7.5 安装kibana

#注意修改命名空间

[root@master-200 log]#  wget https://acs-logging.oss-cn-hangzhou.aliyuncs.com/kibana.yml


#所有节点

[root@node-1 ~]# docker tag repo.hostscc.com/elk/kibana:5.5.1 registry.cn-hangzhou.aliyuncs.com/acs-sample/kibana:5.5.1

#部署

[root@master-200 log]#  kubectl apply -f kibana.yml


8.7.6 访问Kibana 界面

8.7.6.1 获取Kibana节点

[root@master-1 logs]# kubectl get pods -o wide --all-namespaces | grep kibana

kube-system            kibana-777bb4dfb-rz5c4                    1/1     Running   0          31s     172.17.89.13    192.168.91.21   <none>           <none>


8.7.6.2 获取Kibana HostPort 节点

[root@master-1 logs]# kubectl get svc --all-namespaces | grep kibana

kube-system            kibana                    NodePort    10.0.0.163   <none>        80:37293/TCP                 55s



8.7.6.3 访问web界面:

http://192.168.91.21:37293


8.7.6.4 案例一:运行容器收集日志

1. 创建nginx yaml 文件
#yaml 格式文件校验网址: http://www.bejson.com/validators/yaml/
[root@master-1 ~]# mkdir /root/nginx && cd /root/nginx
[root@master-1 nginx]# cat  nginx-demo.yaml                 
apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: nginx-demo
spec:
  selector:
    matchLabels:
      app: nginx-demo
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        env:
        - name: aliyun_logs_nginx
          value: "stdout"
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-demo-svc
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 80
    targetPort: 80
#注意Yaml空格行
#创建pod
[root@master-1 nginx]# kubectl apply -f nginx-demo.yaml 
#检查demo 状态
[root@master-1 nginx]# kubectl get svc,pods
aliyun_logs_catalina=stdout表示要收集容器的 stdout 日志。
aliyun_logs_access=/usr/local/tomcat/logs/catalina.*.log 表示要收集容器内 /usr/local/tomcat/logs/ 目录下所有名字匹配 catalina.*.log 的文件日志。
Log-Pilot 可以依据环境变量 aliyun_logs_$name = $path 动态地生成日志采集配置文件
2. 创建Nginx Ingress
[root@master-1 java]# cat nginx-route.yaml 
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: nginx-demo-route
spec:
  entryPoints:
    - web
  routes:
    - match: Host(`nginx.cc.com`)
      kind: Rule
      services:
        - name: nginx-demo-svc
          port: 80
#创建路由
[root@master-1 nginx]# kubectl  apply -f nginx-route.yaml
3.使用services 访问
kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
4. 绑定主机hosts
192.168.91.21 nginx.cc.com
5.访问界面
6. 查看容器日志
[root@master-1 nginx]# kubectl logs -f nginx-demo-68749b58dc-rr9rj
172.17.89.0 - - [12/Apr/2020:09:52:01 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"
172.17.89.6 - - [12/Apr/2020:09:53:27 +0000] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" "192.168.91.1"
2020/04/12 09:53:27 [error] 6#6: *2 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 172.17.89.6, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "nginx.cc.com", referrer: "http://nginx.cc.com/"
172.17.89.6 - - [12/Apr/2020:09:53:27 +0000] "GET /favicon.ico HTTP/1.1" 404 555 "http://nginx.cc.com/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" "192.168.91.1"
172.17.89.6 - - [12/Apr/2020:09:53:30 +0000] "GET / HTTP/1.1" 304 0 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" "192.168.91.1"
172.17.89.6 - - [12/Apr/2020:09:53:30 +0000] "GET / HTTP/1.1" 304 0 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" "192.168.91.1"
172.17.89.6 - - [12/Apr/2020:09:53:30 +0000] "GET / HTTP/1.1" 304 0 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" "192.168.91.1"
7查看是否建立索引
[root@master-1 nginx]# kubectl get pods -n=kube-system | grep elasticsearch
elasticsearch-0                       1/1     Running   0          24m
elasticsearch-1                       1/1     Running   0          18m
[root@master-1 nginx]# kubectl exec -it elasticsearch-0 /bin/bash -n kube-system
elasticsearch@elasticsearch-0:/usr/share/elasticsearch$ curl 'localhost:9200/_cat/indices?v'
health status index            uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   .kibana          WsgdFZhOTqyd9Mgo-Vk6ew   1   1          1            0      6.4kb          3.2kb
green  open   nginx-2020.04.12 EpqezTn7Sx6fCxDIOxhRFQ   5   1          3            0     81.1kb         40.5kb

8 在kibana 中写入索引的地址




#查看访问日志


9.注意多行日志收集(JAVA)

参考:https://www.iyunw.cn/archives/k8s-tong-guo-log-pilot-cai-ji-ying-yong-ri-zhi-ding-zhi-hua-tomcat-duo-xing/

相关实践学习
通过Ingress进行灰度发布
本场景您将运行一个简单的应用,部署一个新的应用用于新的发布,并通过Ingress能力实现灰度发布。
容器应用与集群管理
欢迎来到《容器应用与集群管理》课程,本课程是“云原生容器Clouder认证“系列中的第二阶段。课程将向您介绍与容器集群相关的概念和技术,这些概念和技术可以帮助您了解阿里云容器服务ACK/ACK Serverless的使用。同时,本课程也会向您介绍可以采取的工具、方法和可操作步骤,以帮助您了解如何基于容器服务ACK Serverless构建和管理企业级应用。 学习完本课程后,您将能够: 掌握容器集群、容器编排的基本概念 掌握Kubernetes的基础概念及核心思想 掌握阿里云容器服务ACK/ACK Serverless概念及使用方法 基于容器服务ACK Serverless搭建和管理企业级网站应用
相关文章
|
7天前
|
运维 Kubernetes Docker
利用Docker和Kubernetes构建微服务架构
利用Docker和Kubernetes构建微服务架构
|
5天前
|
监控 持续交付 Docker
Docker 容器化部署在微服务架构中的应用有哪些?
Docker 容器化部署在微服务架构中的应用有哪些?
|
5天前
|
监控 持续交付 Docker
Docker容器化部署在微服务架构中的应用
Docker容器化部署在微服务架构中的应用
|
5天前
|
安全 持续交付 Docker
微服务架构和 Docker 容器化部署的优点是什么?
微服务架构和 Docker 容器化部署的优点是什么?
|
6天前
|
存储 监控 Docker
探索微服务架构下的容器化部署
本文旨在深入探讨微服务架构下容器化部署的关键技术与实践,通过分析Docker容器技术如何促进微服务的灵活部署和高效管理,揭示其在现代软件开发中的重要性。文章将重点讨论容器化技术的优势、面临的挑战以及最佳实践策略,为读者提供一套完整的理论与实践相结合的指导方案。
|
13天前
|
JavaScript 持续交付 Docker
解锁新技能:Docker容器化部署在微服务架构中的应用
【10月更文挑战第29天】在数字化转型中,微服务架构因灵活性和可扩展性成为企业首选。Docker容器化技术为微服务的部署和管理带来革命性变化。本文探讨Docker在微服务架构中的应用,包括隔离性、可移植性、扩展性、版本控制等方面,并提供代码示例。
50 1
|
21天前
|
Kubernetes 负载均衡 Docker
构建高效微服务架构:Docker与Kubernetes的完美搭档
本文介绍了Docker和Kubernetes在构建高效微服务架构中的应用,涵盖基本概念、在微服务架构中的作用及其实现方法。通过具体实例,如用户服务、商品服务和订单服务,展示了如何利用Docker和Kubernetes实现服务的打包、部署、扩展及管理,确保微服务架构的稳定性和可靠性。
74 7
|
17天前
|
Kubernetes 关系型数据库 MySQL
Kubernetes入门:搭建高可用微服务架构
【10月更文挑战第25天】在快速发展的云计算时代,微服务架构因其灵活性和可扩展性备受青睐。本文通过一个案例分析,展示了如何使用Kubernetes将传统Java Web应用迁移到Kubernetes平台并改造成微服务架构。通过定义Kubernetes服务、创建MySQL的Deployment/RC、改造Web应用以及部署Web应用,最终实现了高可用的微服务架构。Kubernetes不仅提供了服务发现和负载均衡的能力,还通过各种资源管理工具,提升了系统的可扩展性和容错性。
51 3
|
20天前
|
Kubernetes 负载均衡 Docker
构建高效微服务架构:Docker与Kubernetes的完美搭档
【10月更文挑战第22天】随着云计算和容器技术的快速发展,微服务架构逐渐成为现代企业级应用的首选架构。微服务架构将一个大型应用程序拆分为多个小型、独立的服务,每个服务负责完成一个特定的功能。这种架构具有灵活性、可扩展性和易于维护的特点。在构建微服务架构时,Docker和Kubernetes是两个不可或缺的工具,它们可以完美搭档,为微服务架构提供高效的支持。本文将从三个方面探讨Docker和Kubernetes在构建高效微服务架构中的应用:一是Docker和Kubernetes的基本概念;二是它们在微服务架构中的作用;三是通过实例讲解如何使用Docker和Kubernetes构建微服务架构。
55 6
|
23天前
|
Kubernetes 监控 开发者
专家级实践:利用Cloud Toolkit进行微服务治理与容器化部署
【10月更文挑战第19天】在当今的软件开发领域,微服务架构因其高可伸缩性、易于维护和快速迭代的特点而备受青睐。然而,随着微服务数量的增加,管理和服务治理变得越来越复杂。作为阿里巴巴云推出的一款免费且开源的开发者工具,Cloud Toolkit 提供了一系列实用的功能,帮助开发者在微服务治理和容器化部署方面更加高效。本文将从个人的角度出发,探讨如何利用 Cloud Toolkit 来应对这些挑战。
34 2